This is the PR for SPI-NAND changes along with few spi changes. [trini: Re-sync changes for ls1012afrwy_qspi*_defconfig] Signed-off-by: Tom Rini <trini@konsulko.com>lime2-spi
commit
592cd5defd
@ -0,0 +1,473 @@ |
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* mtd.c |
||||
* |
||||
* Generic command to handle basic operations on any memory device. |
||||
* |
||||
* Copyright: Bootlin, 2018 |
||||
* Author: Miquèl Raynal <miquel.raynal@bootlin.com> |
||||
*/ |
||||
|
||||
#include <command.h> |
||||
#include <common.h> |
||||
#include <console.h> |
||||
#include <malloc.h> |
||||
#include <mapmem.h> |
||||
#include <mtd.h> |
||||
|
||||
static uint mtd_len_to_pages(struct mtd_info *mtd, u64 len) |
||||
{ |
||||
do_div(len, mtd->writesize); |
||||
|
||||
return len; |
||||
} |
||||
|
||||
static bool mtd_is_aligned_with_min_io_size(struct mtd_info *mtd, u64 size) |
||||
{ |
||||
return !do_div(size, mtd->writesize); |
||||
} |
||||
|
||||
static bool mtd_is_aligned_with_block_size(struct mtd_info *mtd, u64 size) |
||||
{ |
||||
return !do_div(size, mtd->erasesize); |
||||
} |
||||
|
||||
static void mtd_dump_buf(const u8 *buf, uint len, uint offset) |
||||
{ |
||||
int i, j; |
||||
|
||||
for (i = 0; i < len; ) { |
||||
printf("0x%08x:\t", offset + i); |
||||
for (j = 0; j < 8; j++) |
||||
printf("%02x ", buf[i + j]); |
||||
printf(" "); |
||||
i += 8; |
||||
for (j = 0; j < 8; j++) |
||||
printf("%02x ", buf[i + j]); |
||||
printf("\n"); |
||||
i += 8; |
||||
} |
||||
} |
||||
|
||||
static void mtd_dump_device_buf(struct mtd_info *mtd, u64 start_off, |
||||
const u8 *buf, u64 len, bool woob) |
||||
{ |
||||
bool has_pages = mtd->type == MTD_NANDFLASH || |
||||
mtd->type == MTD_MLCNANDFLASH; |
||||
int npages = mtd_len_to_pages(mtd, len); |
||||
uint page; |
||||
|
||||
if (has_pages) { |
||||
for (page = 0; page < npages; page++) { |
||||
u64 data_off = page * mtd->writesize; |
||||
|
||||
printf("\nDump %d data bytes from 0x%08llx:\n", |
||||
mtd->writesize, start_off + data_off); |
||||
mtd_dump_buf(&buf[data_off], |
||||
mtd->writesize, start_off + data_off); |
||||
|
||||
if (woob) { |
||||
u64 oob_off = page * mtd->oobsize; |
||||
|
||||
printf("Dump %d OOB bytes from page at 0x%08llx:\n", |
||||
mtd->oobsize, start_off + data_off); |
||||
mtd_dump_buf(&buf[len + oob_off], |
||||
mtd->oobsize, 0); |
||||
} |
||||
} |
||||
} else { |
||||
printf("\nDump %lld data bytes from 0x%llx:\n", |
||||
len, start_off); |
||||
mtd_dump_buf(buf, len, start_off); |
||||
} |
||||
} |
||||
|
||||
static void mtd_show_parts(struct mtd_info *mtd, int level) |
||||
{ |
||||
struct mtd_info *part; |
||||
int i; |
||||
|
||||
list_for_each_entry(part, &mtd->partitions, node) { |
||||
for (i = 0; i < level; i++) |
||||
printf("\t"); |
||||
printf(" - 0x%012llx-0x%012llx : \"%s\"\n", |
||||
part->offset, part->offset + part->size, part->name); |
||||
|
||||
mtd_show_parts(part, level + 1); |
||||
} |
||||
} |
||||
|
||||
static void mtd_show_device(struct mtd_info *mtd) |
||||
{ |
||||
/* Device */ |
||||
printf("* %s\n", mtd->name); |
||||
#if defined(CONFIG_DM) |
||||
if (mtd->dev) { |
||||
printf(" - device: %s\n", mtd->dev->name); |
||||
printf(" - parent: %s\n", mtd->dev->parent->name); |
||||
printf(" - driver: %s\n", mtd->dev->driver->name); |
||||
} |
||||
#endif |
||||
|
||||
/* MTD device information */ |
||||
printf(" - type: "); |
||||
switch (mtd->type) { |
||||
case MTD_RAM: |
||||
printf("RAM\n"); |
||||
break; |
||||
case MTD_ROM: |
||||
printf("ROM\n"); |
||||
break; |
||||
case MTD_NORFLASH: |
||||
printf("NOR flash\n"); |
||||
break; |
||||
case MTD_NANDFLASH: |
||||
printf("NAND flash\n"); |
||||
break; |
||||
case MTD_DATAFLASH: |
||||
printf("Data flash\n"); |
||||
break; |
||||
case MTD_UBIVOLUME: |
||||
printf("UBI volume\n"); |
||||
break; |
||||
case MTD_MLCNANDFLASH: |
||||
printf("MLC NAND flash\n"); |
||||
break; |
||||
case MTD_ABSENT: |
||||
default: |
||||
printf("Unknown\n"); |
||||
break; |
||||
} |
||||
|
||||
printf(" - block size: 0x%x bytes\n", mtd->erasesize); |
||||
printf(" - min I/O: 0x%x bytes\n", mtd->writesize); |
||||
|
||||
if (mtd->oobsize) { |
||||
printf(" - OOB size: %u bytes\n", mtd->oobsize); |
||||
printf(" - OOB available: %u bytes\n", mtd->oobavail); |
||||
} |
||||
|
||||
if (mtd->ecc_strength) { |
||||
printf(" - ECC strength: %u bits\n", mtd->ecc_strength); |
||||
printf(" - ECC step size: %u bytes\n", mtd->ecc_step_size); |
||||
printf(" - bitflip threshold: %u bits\n", |
||||
mtd->bitflip_threshold); |
||||
} |
||||
|
||||
printf(" - 0x%012llx-0x%012llx : \"%s\"\n", |
||||
mtd->offset, mtd->offset + mtd->size, mtd->name); |
||||
|
||||
/* MTD partitions, if any */ |
||||
mtd_show_parts(mtd, 1); |
||||
} |
||||
|
||||
/* Logic taken from fs/ubifs/recovery.c:is_empty() */ |
||||
static bool mtd_oob_write_is_empty(struct mtd_oob_ops *op) |
||||
{ |
||||
int i; |
||||
|
||||
for (i = 0; i < op->len; i++) |
||||
if (op->datbuf[i] != 0xff) |
||||
return false; |
||||
|
||||
for (i = 0; i < op->ooblen; i++) |
||||
if (op->oobbuf[i] != 0xff) |
||||
return false; |
||||
|
||||
return true; |
||||
} |
||||
|
||||
static int do_mtd_list(void) |
||||
{ |
||||
struct mtd_info *mtd; |
||||
int dev_nb = 0; |
||||
|
||||
/* Ensure all devices (and their partitions) are probed */ |
||||
mtd_probe_devices(); |
||||
|
||||
printf("List of MTD devices:\n"); |
||||
mtd_for_each_device(mtd) { |
||||
if (!mtd_is_partition(mtd)) |
||||
mtd_show_device(mtd); |
||||
|
||||
dev_nb++; |
||||
} |
||||
|
||||
if (!dev_nb) { |
||||
printf("No MTD device found\n"); |
||||
return CMD_RET_FAILURE; |
||||
} |
||||
|
||||
return CMD_RET_SUCCESS; |
||||
} |
||||
|
||||
static int mtd_special_write_oob(struct mtd_info *mtd, u64 off, |
||||
struct mtd_oob_ops *io_op, |
||||
bool write_empty_pages, bool woob) |
||||
{ |
||||
int ret = 0; |
||||
|
||||
/*
|
||||
* By default, do not write an empty page. |
||||
* Skip it by simulating a successful write. |
||||
*/ |
||||
if (!write_empty_pages && mtd_oob_write_is_empty(io_op)) { |
||||
io_op->retlen = mtd->writesize; |
||||
io_op->oobretlen = woob ? mtd->oobsize : 0; |
||||
} else { |
||||
ret = mtd_write_oob(mtd, off, io_op); |
||||
} |
||||
|
||||
return ret; |
||||
} |
||||
|
||||
static int do_mtd(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) |
||||
{ |
||||
struct mtd_info *mtd; |
||||
const char *cmd; |
||||
char *mtd_name; |
||||
|
||||
/* All MTD commands need at least two arguments */ |
||||
if (argc < 2) |
||||
return CMD_RET_USAGE; |
||||
|
||||
/* Parse the command name and its optional suffixes */ |
||||
cmd = argv[1]; |
||||
|
||||
/* List the MTD devices if that is what the user wants */ |
||||
if (strcmp(cmd, "list") == 0) |
||||
return do_mtd_list(); |
||||
|
||||
/*
|
||||
* The remaining commands require also at least a device ID. |
||||
* Check the selected device is valid. Ensure it is probed. |
||||
*/ |
||||
if (argc < 3) |
||||
return CMD_RET_USAGE; |
||||
|
||||
mtd_name = argv[2]; |
||||
mtd_probe_devices(); |
||||
mtd = get_mtd_device_nm(mtd_name); |
||||
if (IS_ERR_OR_NULL(mtd)) { |
||||
printf("MTD device %s not found, ret %ld\n", |
||||
mtd_name, PTR_ERR(mtd)); |
||||
return CMD_RET_FAILURE; |
||||
} |
||||
put_mtd_device(mtd); |
||||
|
||||
argc -= 3; |
||||
argv += 3; |
||||
|
||||
/* Do the parsing */ |
||||
if (!strncmp(cmd, "read", 4) || !strncmp(cmd, "dump", 4) || |
||||
!strncmp(cmd, "write", 5)) { |
||||
bool has_pages = mtd->type == MTD_NANDFLASH || |
||||
mtd->type == MTD_MLCNANDFLASH; |
||||
bool dump, read, raw, woob, write_empty_pages; |
||||
struct mtd_oob_ops io_op = {}; |
||||
uint user_addr = 0, npages; |
||||
u64 start_off, off, len, remaining, default_len; |
||||
u32 oob_len; |
||||
u8 *buf; |
||||
int ret; |
||||
|
||||
dump = !strncmp(cmd, "dump", 4); |
||||
read = dump || !strncmp(cmd, "read", 4); |
||||
raw = strstr(cmd, ".raw"); |
||||
woob = strstr(cmd, ".oob"); |
||||
write_empty_pages = !has_pages || strstr(cmd, ".dontskipff"); |
||||
|
||||
if (!dump) { |
||||
if (!argc) |
||||
return CMD_RET_USAGE; |
||||
|
||||
user_addr = simple_strtoul(argv[0], NULL, 16); |
||||
argc--; |
||||
argv++; |
||||
} |
||||
|
||||
start_off = argc > 0 ? simple_strtoul(argv[0], NULL, 16) : 0; |
||||
if (!mtd_is_aligned_with_min_io_size(mtd, start_off)) { |
||||
printf("Offset not aligned with a page (0x%x)\n", |
||||
mtd->writesize); |
||||
return CMD_RET_FAILURE; |
||||
} |
||||
|
||||
default_len = dump ? mtd->writesize : mtd->size; |
||||
len = argc > 1 ? simple_strtoul(argv[1], NULL, 16) : |
||||
default_len; |
||||
if (!mtd_is_aligned_with_min_io_size(mtd, len)) { |
||||
len = round_up(len, mtd->writesize); |
||||
printf("Size not on a page boundary (0x%x), rounding to 0x%llx\n", |
||||
mtd->writesize, len); |
||||
} |
||||
|
||||
remaining = len; |
||||
npages = mtd_len_to_pages(mtd, len); |
||||
oob_len = woob ? npages * mtd->oobsize : 0; |
||||
|
||||
if (dump) |
||||
buf = kmalloc(len + oob_len, GFP_KERNEL); |
||||
else |
||||
buf = map_sysmem(user_addr, 0); |
||||
|
||||
if (!buf) { |
||||
printf("Could not map/allocate the user buffer\n"); |
||||
return CMD_RET_FAILURE; |
||||
} |
||||
|
||||
if (has_pages) |
||||
printf("%s %lld byte(s) (%d page(s)) at offset 0x%08llx%s%s%s\n", |
||||
read ? "Reading" : "Writing", len, npages, start_off, |
||||
raw ? " [raw]" : "", woob ? " [oob]" : "", |
||||
!read && write_empty_pages ? " [dontskipff]" : ""); |
||||
else |
||||
printf("%s %lld byte(s) at offset 0x%08llx\n", |
||||
read ? "Reading" : "Writing", len, start_off); |
||||
|
||||
io_op.mode = raw ? MTD_OPS_RAW : MTD_OPS_AUTO_OOB; |
||||
io_op.len = has_pages ? mtd->writesize : len; |
||||
io_op.ooblen = woob ? mtd->oobsize : 0; |
||||
io_op.datbuf = buf; |
||||
io_op.oobbuf = woob ? &buf[len] : NULL; |
||||
|
||||
/* Search for the first good block after the given offset */ |
||||
off = start_off; |
||||
while (mtd_block_isbad(mtd, off)) |
||||
off += mtd->erasesize; |
||||
|
||||
/* Loop over the pages to do the actual read/write */ |
||||
while (remaining) { |
||||
/* Skip the block if it is bad */ |
||||
if (mtd_is_aligned_with_block_size(mtd, off) && |
||||
mtd_block_isbad(mtd, off)) { |
||||
off += mtd->erasesize; |
||||
continue; |
||||
} |
||||
|
||||
if (read) |
||||
ret = mtd_read_oob(mtd, off, &io_op); |
||||
else |
||||
ret = mtd_special_write_oob(mtd, off, &io_op, |
||||
write_empty_pages, |
||||
woob); |
||||
|
||||
if (ret) { |
||||
printf("Failure while %s at offset 0x%llx\n", |
||||
read ? "reading" : "writing", off); |
||||
return CMD_RET_FAILURE; |
||||
} |
||||
|
||||
off += io_op.retlen; |
||||
remaining -= io_op.retlen; |
||||
io_op.datbuf += io_op.retlen; |
||||
io_op.oobbuf += io_op.oobretlen; |
||||
} |
||||
|
||||
if (!ret && dump) |
||||
mtd_dump_device_buf(mtd, start_off, buf, len, woob); |
||||
|
||||
if (dump) |
||||
kfree(buf); |
||||
else |
||||
unmap_sysmem(buf); |
||||
|
||||
if (ret) { |
||||
printf("%s on %s failed with error %d\n", |
||||
read ? "Read" : "Write", mtd->name, ret); |
||||
return CMD_RET_FAILURE; |
||||
} |
||||
|
||||
} else if (!strcmp(cmd, "erase")) { |
||||
bool scrub = strstr(cmd, ".dontskipbad"); |
||||
struct erase_info erase_op = {}; |
||||
u64 off, len; |
||||
int ret; |
||||
|
||||
off = argc > 0 ? simple_strtoul(argv[0], NULL, 16) : 0; |
||||
len = argc > 1 ? simple_strtoul(argv[1], NULL, 16) : mtd->size; |
||||
|
||||
if (!mtd_is_aligned_with_block_size(mtd, off)) { |
||||
printf("Offset not aligned with a block (0x%x)\n", |
||||
mtd->erasesize); |
||||
return CMD_RET_FAILURE; |
||||
} |
||||
|
||||
if (!mtd_is_aligned_with_block_size(mtd, len)) { |
||||
printf("Size not a multiple of a block (0x%x)\n", |
||||
mtd->erasesize); |
||||
return CMD_RET_FAILURE; |
||||
} |
||||
|
||||
printf("Erasing 0x%08llx ... 0x%08llx (%d eraseblock(s))\n", |
||||
off, off + len - 1, mtd_div_by_eb(len, mtd)); |
||||
|
||||
erase_op.mtd = mtd; |
||||
erase_op.addr = off; |
||||
erase_op.len = len; |
||||
erase_op.scrub = scrub; |
||||
|
||||
while (erase_op.len) { |
||||
ret = mtd_erase(mtd, &erase_op); |
||||
|
||||
/* Abort if its not a bad block error */ |
||||
if (ret != -EIO) |
||||
break; |
||||
|
||||
printf("Skipping bad block at 0x%08llx\n", |
||||
erase_op.fail_addr); |
||||
|
||||
/* Skip bad block and continue behind it */ |
||||
erase_op.len -= erase_op.fail_addr - erase_op.addr; |
||||
erase_op.len -= mtd->erasesize; |
||||
erase_op.addr = erase_op.fail_addr + mtd->erasesize; |
||||
} |
||||
|
||||
if (ret && ret != -EIO) |
||||
return CMD_RET_FAILURE; |
||||
} else if (!strcmp(cmd, "bad")) { |
||||
loff_t off; |
||||
|
||||
if (!mtd_can_have_bb(mtd)) { |
||||
printf("Only NAND-based devices can have bad blocks\n"); |
||||
return CMD_RET_SUCCESS; |
||||
} |
||||
|
||||
printf("MTD device %s bad blocks list:\n", mtd->name); |
||||
for (off = 0; off < mtd->size; off += mtd->erasesize) |
||||
if (mtd_block_isbad(mtd, off)) |
||||
printf("\t0x%08llx\n", off); |
||||
} else { |
||||
return CMD_RET_USAGE; |
||||
} |
||||
|
||||
return CMD_RET_SUCCESS; |
||||
} |
||||
|
||||
static char mtd_help_text[] = |
||||
#ifdef CONFIG_SYS_LONGHELP |
||||
"- generic operations on memory technology devices\n\n" |
||||
"mtd list\n" |
||||
"mtd read[.raw][.oob] <name> <addr> [<off> [<size>]]\n" |
||||
"mtd dump[.raw][.oob] <name> [<off> [<size>]]\n" |
||||
"mtd write[.raw][.oob][.dontskipff] <name> <addr> [<off> [<size>]]\n" |
||||
"mtd erase[.dontskipbad] <name> [<off> [<size>]]\n" |
||||
"\n" |
||||
"Specific functions:\n" |
||||
"mtd bad <name>\n" |
||||
"\n" |
||||
"With:\n" |
||||
"\t<name>: NAND partition/chip name\n" |
||||
"\t<addr>: user address from/to which data will be retrieved/stored\n" |
||||
"\t<off>: offset in <name> in bytes (default: start of the part)\n" |
||||
"\t\t* must be block-aligned for erase\n" |
||||
"\t\t* must be page-aligned otherwise\n" |
||||
"\t<size>: length of the operation in bytes (default: the entire device)\n" |
||||
"\t\t* must be a multiple of a block for erase\n" |
||||
"\t\t* must be a multiple of a page otherwise (special case: default is a page with dump)\n" |
||||
"\n" |
||||
"The .dontskipff option forces writing empty pages, don't use it if unsure.\n" |
||||
#endif |
||||
""; |
||||
|
||||
U_BOOT_CMD(mtd, 10, 1, do_mtd, "MTD utils", mtd_help_text); |
@ -0,0 +1,5 @@ |
||||
SPI NAND flash |
||||
|
||||
Required properties: |
||||
- compatible: should be "spi-nand" |
||||
- reg: should encode the chip-select line used to access the NAND chip |
@ -1,297 +1,6 @@ |
||||
config MTD_NAND_CORE |
||||
tristate |
||||
|
||||
menuconfig NAND |
||||
bool "NAND Device Support" |
||||
if NAND |
||||
source "drivers/mtd/nand/raw/Kconfig" |
||||
|
||||
config SYS_NAND_SELF_INIT |
||||
bool |
||||
help |
||||
This option, if enabled, provides more flexible and linux-like |
||||
NAND initialization process. |
||||
|
||||
config NAND_ATMEL |
||||
bool "Support Atmel NAND controller" |
||||
imply SYS_NAND_USE_FLASH_BBT |
||||
help |
||||
Enable this driver for NAND flash platforms using an Atmel NAND |
||||
controller. |
||||
|
||||
config NAND_DAVINCI |
||||
bool "Support TI Davinci NAND controller" |
||||
help |
||||
Enable this driver for NAND flash controllers available in TI Davinci |
||||
and Keystone2 platforms |
||||
|
||||
config NAND_DENALI |
||||
bool |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
|
||||
config NAND_DENALI_DT |
||||
bool "Support Denali NAND controller as a DT device" |
||||
select NAND_DENALI |
||||
depends on OF_CONTROL && DM |
||||
help |
||||
Enable the driver for NAND flash on platforms using a Denali NAND |
||||
controller as a DT device. |
||||
|
||||
config NAND_DENALI_SPARE_AREA_SKIP_BYTES |
||||
int "Number of bytes skipped in OOB area" |
||||
depends on NAND_DENALI |
||||
range 0 63 |
||||
help |
||||
This option specifies the number of bytes to skip from the beginning |
||||
of OOB area before last ECC sector data starts. This is potentially |
||||
used to preserve the bad block marker in the OOB area. |
||||
|
||||
config NAND_LPC32XX_SLC |
||||
bool "Support LPC32XX_SLC controller" |
||||
help |
||||
Enable the LPC32XX SLC NAND controller. |
||||
|
||||
config NAND_OMAP_GPMC |
||||
bool "Support OMAP GPMC NAND controller" |
||||
depends on ARCH_OMAP2PLUS |
||||
help |
||||
Enables omap_gpmc.c driver for OMAPx and AMxxxx platforms. |
||||
GPMC controller is used for parallel NAND flash devices, and can |
||||
do ECC calculation (not ECC error detection) for HAM1, BCH4, BCH8 |
||||
and BCH16 ECC algorithms. |
||||
|
||||
config NAND_OMAP_GPMC_PREFETCH |
||||
bool "Enable GPMC Prefetch" |
||||
depends on NAND_OMAP_GPMC |
||||
default y |
||||
help |
||||
On OMAP platforms that use the GPMC controller |
||||
(CONFIG_NAND_OMAP_GPMC_PREFETCH), this options enables the code that |
||||
uses the prefetch mode to speed up read operations. |
||||
|
||||
config NAND_OMAP_ELM |
||||
bool "Enable ELM driver for OMAPxx and AMxx platforms." |
||||
depends on NAND_OMAP_GPMC && !OMAP34XX |
||||
help |
||||
ELM controller is used for ECC error detection (not ECC calculation) |
||||
of BCH4, BCH8 and BCH16 ECC algorithms. |
||||
Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine, |
||||
thus such SoC platforms need to depend on software library for ECC error |
||||
detection. However ECC calculation on such plaforms would still be |
||||
done by GPMC controller. |
||||
|
||||
config NAND_VF610_NFC |
||||
bool "Support for Freescale NFC for VF610" |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
help |
||||
Enables support for NAND Flash Controller on some Freescale |
||||
processors like the VF610, MCF54418 or Kinetis K70. |
||||
The driver supports a maximum 2k page size. The driver |
||||
currently does not support hardware ECC. |
||||
|
||||
choice |
||||
prompt "Hardware ECC strength" |
||||
depends on NAND_VF610_NFC |
||||
default SYS_NAND_VF610_NFC_45_ECC_BYTES |
||||
help |
||||
Select the ECC strength used in the hardware BCH ECC block. |
||||
|
||||
config SYS_NAND_VF610_NFC_45_ECC_BYTES |
||||
bool "24-error correction (45 ECC bytes)" |
||||
|
||||
config SYS_NAND_VF610_NFC_60_ECC_BYTES |
||||
bool "32-error correction (60 ECC bytes)" |
||||
|
||||
endchoice |
||||
|
||||
config NAND_PXA3XX |
||||
bool "Support for NAND on PXA3xx and Armada 370/XP/38x" |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
help |
||||
This enables the driver for the NAND flash device found on |
||||
PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). |
||||
|
||||
config NAND_SUNXI |
||||
bool "Support for NAND on Allwinner SoCs" |
||||
default ARCH_SUNXI |
||||
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUN8I |
||||
select SYS_NAND_SELF_INIT |
||||
select SYS_NAND_U_BOOT_LOCATIONS |
||||
select SPL_NAND_SUPPORT |
||||
imply CMD_NAND |
||||
---help--- |
||||
Enable support for NAND. This option enables the standard and |
||||
SPL drivers. |
||||
The SPL driver only supports reading from the NAND using DMA |
||||
transfers. |
||||
|
||||
if NAND_SUNXI |
||||
|
||||
config NAND_SUNXI_SPL_ECC_STRENGTH |
||||
int "Allwinner NAND SPL ECC Strength" |
||||
default 64 |
||||
|
||||
config NAND_SUNXI_SPL_ECC_SIZE |
||||
int "Allwinner NAND SPL ECC Step Size" |
||||
default 1024 |
||||
|
||||
config NAND_SUNXI_SPL_USABLE_PAGE_SIZE |
||||
int "Allwinner NAND SPL Usable Page Size" |
||||
default 1024 |
||||
|
||||
endif |
||||
|
||||
config NAND_ARASAN |
||||
bool "Configure Arasan Nand" |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
help |
||||
This enables Nand driver support for Arasan nand flash |
||||
controller. This uses the hardware ECC for read and |
||||
write operations. |
||||
|
||||
config NAND_MXC |
||||
bool "MXC NAND support" |
||||
depends on CPU_ARM926EJS || CPU_ARM1136 || MX5 |
||||
imply CMD_NAND |
||||
help |
||||
This enables the NAND driver for the NAND flash controller on the |
||||
i.MX27 / i.MX31 / i.MX5 rocessors. |
||||
|
||||
config NAND_MXS |
||||
bool "MXS NAND support" |
||||
depends on MX23 || MX28 || MX6 || MX7 |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
select APBH_DMA |
||||
select APBH_DMA_BURST if ARCH_MX6 || ARCH_MX7 |
||||
select APBH_DMA_BURST8 if ARCH_MX6 || ARCH_MX7 |
||||
help |
||||
This enables NAND driver for the NAND flash controller on the |
||||
MXS processors. |
||||
|
||||
if NAND_MXS |
||||
|
||||
config NAND_MXS_DT |
||||
bool "Support MXS NAND controller as a DT device" |
||||
depends on OF_CONTROL && MTD |
||||
help |
||||
Enable the driver for MXS NAND flash on platforms using |
||||
device tree. |
||||
|
||||
config NAND_MXS_USE_MINIMUM_ECC |
||||
bool "Use minimum ECC strength supported by the controller" |
||||
default false |
||||
|
||||
endif |
||||
|
||||
config NAND_ZYNQ |
||||
bool "Support for Zynq Nand controller" |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
help |
||||
This enables Nand driver support for Nand flash controller |
||||
found on Zynq SoC. |
||||
|
||||
config NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS |
||||
bool "Enable use of 1st stage bootloader timing for NAND" |
||||
depends on NAND_ZYNQ |
||||
help |
||||
This flag prevent U-boot reconfigure NAND flash controller and reuse |
||||
the NAND timing from 1st stage bootloader. |
||||
|
||||
comment "Generic NAND options" |
||||
|
||||
config SYS_NAND_BLOCK_SIZE |
||||
hex "NAND chip eraseblock size" |
||||
depends on ARCH_SUNXI |
||||
help |
||||
Number of data bytes in one eraseblock for the NAND chip on the |
||||
board. This is the multiple of NAND_PAGE_SIZE and the number of |
||||
pages. |
||||
|
||||
config SYS_NAND_PAGE_SIZE |
||||
hex "NAND chip page size" |
||||
depends on ARCH_SUNXI |
||||
help |
||||
Number of data bytes in one page for the NAND chip on the |
||||
board, not including the OOB area. |
||||
|
||||
config SYS_NAND_OOBSIZE |
||||
hex "NAND chip OOB size" |
||||
depends on ARCH_SUNXI |
||||
help |
||||
Number of bytes in the Out-Of-Band area for the NAND chip on |
||||
the board. |
||||
|
||||
# Enhance depends when converting drivers to Kconfig which use this config |
||||
# option (mxc_nand, ndfc, omap_gpmc). |
||||
config SYS_NAND_BUSWIDTH_16BIT |
||||
bool "Use 16-bit NAND interface" |
||||
depends on NAND_VF610_NFC || NAND_OMAP_GPMC || NAND_MXC || ARCH_DAVINCI |
||||
help |
||||
Indicates that NAND device has 16-bit wide data-bus. In absence of this |
||||
config, bus-width of NAND device is assumed to be either 8-bit and later |
||||
determined by reading ONFI params. |
||||
Above config is useful when NAND device's bus-width information cannot |
||||
be determined from on-chip ONFI params, like in following scenarios: |
||||
- SPL boot does not support reading of ONFI parameters. This is done to |
||||
keep SPL code foot-print small. |
||||
- In current U-Boot flow using nand_init(), driver initialization |
||||
happens in board_nand_init() which is called before any device probe |
||||
(nand_scan_ident + nand_scan_tail), thus device's ONFI parameters are |
||||
not available while configuring controller. So a static CONFIG_NAND_xx |
||||
is needed to know the device's bus-width in advance. |
||||
|
||||
if SPL |
||||
|
||||
config SYS_NAND_U_BOOT_LOCATIONS |
||||
bool "Define U-boot binaries locations in NAND" |
||||
help |
||||
Enable CONFIG_SYS_NAND_U_BOOT_OFFS though Kconfig. |
||||
This option should not be enabled when compiling U-boot for boards |
||||
defining CONFIG_SYS_NAND_U_BOOT_OFFS in their include/configs/<board>.h |
||||
file. |
||||
|
||||
config SYS_NAND_U_BOOT_OFFS |
||||
hex "Location in NAND to read U-Boot from" |
||||
default 0x800000 if NAND_SUNXI |
||||
depends on SYS_NAND_U_BOOT_LOCATIONS |
||||
help |
||||
Set the offset from the start of the nand where u-boot should be |
||||
loaded from. |
||||
|
||||
config SYS_NAND_U_BOOT_OFFS_REDUND |
||||
hex "Location in NAND to read U-Boot from" |
||||
default SYS_NAND_U_BOOT_OFFS |
||||
depends on SYS_NAND_U_BOOT_LOCATIONS |
||||
help |
||||
Set the offset from the start of the nand where the redundant u-boot |
||||
should be loaded from. |
||||
|
||||
config SPL_NAND_AM33XX_BCH |
||||
bool "Enables SPL-NAND driver which supports ELM based" |
||||
depends on NAND_OMAP_GPMC && !OMAP34XX |
||||
default y |
||||
help |
||||
Hardware ECC correction. This is useful for platforms which have ELM |
||||
hardware engine and use NAND boot mode. |
||||
Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine, |
||||
so those platforms should use CONFIG_SPL_NAND_SIMPLE for enabling |
||||
SPL-NAND driver with software ECC correction support. |
||||
|
||||
config SPL_NAND_DENALI |
||||
bool "Support Denali NAND controller for SPL" |
||||
help |
||||
This is a small implementation of the Denali NAND controller |
||||
for use on SPL. |
||||
|
||||
config SPL_NAND_SIMPLE |
||||
bool "Use simple SPL NAND driver" |
||||
depends on !SPL_NAND_AM33XX_BCH |
||||
help |
||||
Support for NAND boot using simple NAND drivers that |
||||
expose the cmd_ctrl() interface. |
||||
endif |
||||
|
||||
endif # if NAND |
||||
source "drivers/mtd/nand/spi/Kconfig" |
||||
|
@ -1,77 +1,5 @@ |
||||
# SPDX-License-Identifier: GPL-2.0+
|
||||
#
|
||||
# (C) Copyright 2006
|
||||
# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
|
||||
|
||||
ifdef CONFIG_SPL_BUILD |
||||
|
||||
ifdef CONFIG_SPL_NAND_DRIVERS |
||||
NORMAL_DRIVERS=y
|
||||
endif |
||||
|
||||
obj-$(CONFIG_SPL_NAND_AM33XX_BCH) += am335x_spl_bch.o
|
||||
obj-$(CONFIG_SPL_NAND_DENALI) += denali_spl.o
|
||||
obj-$(CONFIG_SPL_NAND_SIMPLE) += nand_spl_simple.o
|
||||
obj-$(CONFIG_SPL_NAND_LOAD) += nand_spl_load.o
|
||||
obj-$(CONFIG_SPL_NAND_ECC) += nand_ecc.o
|
||||
obj-$(CONFIG_SPL_NAND_BASE) += nand_base.o
|
||||
obj-$(CONFIG_SPL_NAND_IDENT) += nand_ids.o nand_timings.o
|
||||
obj-$(CONFIG_SPL_NAND_INIT) += nand.o
|
||||
ifeq ($(CONFIG_SPL_ENV_SUPPORT),y) |
||||
obj-$(CONFIG_ENV_IS_IN_NAND) += nand_util.o
|
||||
endif |
||||
|
||||
else # not spl
|
||||
|
||||
NORMAL_DRIVERS=y
|
||||
|
||||
obj-y += nand.o
|
||||
obj-y += nand_bbt.o
|
||||
obj-y += nand_ids.o
|
||||
obj-y += nand_util.o
|
||||
obj-y += nand_ecc.o
|
||||
obj-y += nand_base.o
|
||||
obj-y += nand_timings.o
|
||||
|
||||
endif # not spl
|
||||
|
||||
ifdef NORMAL_DRIVERS |
||||
|
||||
obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o
|
||||
|
||||
obj-$(CONFIG_NAND_ATMEL) += atmel_nand.o
|
||||
obj-$(CONFIG_NAND_ARASAN) += arasan_nfc.o
|
||||
obj-$(CONFIG_NAND_DAVINCI) += davinci_nand.o
|
||||
obj-$(CONFIG_NAND_DENALI) += denali.o
|
||||
obj-$(CONFIG_NAND_DENALI_DT) += denali_dt.o
|
||||
obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o
|
||||
obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o
|
||||
obj-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o
|
||||
obj-$(CONFIG_NAND_FSMC) += fsmc_nand.o
|
||||
obj-$(CONFIG_NAND_KB9202) += kb9202_nand.o
|
||||
obj-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o
|
||||
obj-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o
|
||||
obj-$(CONFIG_NAND_LPC32XX_MLC) += lpc32xx_nand_mlc.o
|
||||
obj-$(CONFIG_NAND_LPC32XX_SLC) += lpc32xx_nand_slc.o
|
||||
obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o
|
||||
obj-$(CONFIG_NAND_MXC) += mxc_nand.o
|
||||
obj-$(CONFIG_NAND_MXS) += mxs_nand.o
|
||||
obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o
|
||||
obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o
|
||||
obj-$(CONFIG_NAND_SPEAR) += spr_nand.o
|
||||
obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o
|
||||
obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o
|
||||
obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o
|
||||
obj-$(CONFIG_NAND_PLAT) += nand_plat.o
|
||||
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o
|
||||
obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o
|
||||
|
||||
else # minimal SPL drivers
|
||||
|
||||
obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_spl.o
|
||||
obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_spl.o
|
||||
obj-$(CONFIG_NAND_MXC) += mxc_nand_spl.o
|
||||
obj-$(CONFIG_NAND_MXS) += mxs_nand_spl.o mxs_nand.o
|
||||
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand_spl.o
|
||||
|
||||
endif # drivers
|
||||
nandcore-objs := core.o bbt.o
|
||||
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
|
||||
obj-$(CONFIG_MTD_SPI_NAND) += spi/
|
||||
|
@ -0,0 +1,132 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2017 Free Electrons |
||||
* |
||||
* Authors: |
||||
* Boris Brezillon <boris.brezillon@free-electrons.com> |
||||
* Peter Pan <peterpandong@micron.com> |
||||
*/ |
||||
|
||||
#define pr_fmt(fmt) "nand-bbt: " fmt |
||||
|
||||
#include <linux/mtd/nand.h> |
||||
#ifndef __UBOOT__ |
||||
#include <linux/slab.h> |
||||
#endif |
||||
|
||||
/**
|
||||
* nanddev_bbt_init() - Initialize the BBT (Bad Block Table) |
||||
* @nand: NAND device |
||||
* |
||||
* Initialize the in-memory BBT. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
int nanddev_bbt_init(struct nand_device *nand) |
||||
{ |
||||
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); |
||||
unsigned int nblocks = nanddev_neraseblocks(nand); |
||||
unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block, |
||||
BITS_PER_LONG); |
||||
|
||||
nand->bbt.cache = kzalloc(nwords, GFP_KERNEL); |
||||
if (!nand->bbt.cache) |
||||
return -ENOMEM; |
||||
|
||||
return 0; |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_bbt_init); |
||||
|
||||
/**
|
||||
* nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table) |
||||
* @nand: NAND device |
||||
* |
||||
* Undoes what has been done in nanddev_bbt_init() |
||||
*/ |
||||
void nanddev_bbt_cleanup(struct nand_device *nand) |
||||
{ |
||||
kfree(nand->bbt.cache); |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup); |
||||
|
||||
/**
|
||||
* nanddev_bbt_update() - Update a BBT |
||||
* @nand: nand device |
||||
* |
||||
* Update the BBT. Currently a NOP function since on-flash bbt is not yet |
||||
* supported. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
int nanddev_bbt_update(struct nand_device *nand) |
||||
{ |
||||
return 0; |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_bbt_update); |
||||
|
||||
/**
|
||||
* nanddev_bbt_get_block_status() - Return the status of an eraseblock |
||||
* @nand: nand device |
||||
* @entry: the BBT entry |
||||
* |
||||
* Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry |
||||
* is bigger than the BBT size. |
||||
*/ |
||||
int nanddev_bbt_get_block_status(const struct nand_device *nand, |
||||
unsigned int entry) |
||||
{ |
||||
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); |
||||
unsigned long *pos = nand->bbt.cache + |
||||
((entry * bits_per_block) / BITS_PER_LONG); |
||||
unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG; |
||||
unsigned long status; |
||||
|
||||
if (entry >= nanddev_neraseblocks(nand)) |
||||
return -ERANGE; |
||||
|
||||
status = pos[0] >> offs; |
||||
if (bits_per_block + offs > BITS_PER_LONG) |
||||
status |= pos[1] << (BITS_PER_LONG - offs); |
||||
|
||||
return status & GENMASK(bits_per_block - 1, 0); |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status); |
||||
|
||||
/**
|
||||
* nanddev_bbt_set_block_status() - Update the status of an eraseblock in the |
||||
* in-memory BBT |
||||
* @nand: nand device |
||||
* @entry: the BBT entry to update |
||||
* @status: the new status |
||||
* |
||||
* Update an entry of the in-memory BBT. If you want to push the updated BBT |
||||
* the NAND you should call nanddev_bbt_update(). |
||||
* |
||||
* Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT |
||||
* size. |
||||
*/ |
||||
int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, |
||||
enum nand_bbt_block_status status) |
||||
{ |
||||
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS); |
||||
unsigned long *pos = nand->bbt.cache + |
||||
((entry * bits_per_block) / BITS_PER_LONG); |
||||
unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG; |
||||
unsigned long val = status & GENMASK(bits_per_block - 1, 0); |
||||
|
||||
if (entry >= nanddev_neraseblocks(nand)) |
||||
return -ERANGE; |
||||
|
||||
pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs); |
||||
pos[0] |= val << offs; |
||||
|
||||
if (bits_per_block + offs > BITS_PER_LONG) { |
||||
unsigned int rbits = bits_per_block + offs - BITS_PER_LONG; |
||||
|
||||
pos[1] &= ~GENMASK(rbits - 1, 0); |
||||
pos[1] |= val >> rbits; |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status); |
@ -0,0 +1,243 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2017 Free Electrons |
||||
* |
||||
* Authors: |
||||
* Boris Brezillon <boris.brezillon@free-electrons.com> |
||||
* Peter Pan <peterpandong@micron.com> |
||||
*/ |
||||
|
||||
#define pr_fmt(fmt) "nand: " fmt |
||||
|
||||
#ifndef __UBOOT__ |
||||
#include <linux/module.h> |
||||
#endif |
||||
#include <linux/mtd/nand.h> |
||||
|
||||
/**
|
||||
* nanddev_isbad() - Check if a block is bad |
||||
* @nand: NAND device |
||||
* @pos: position pointing to the block we want to check |
||||
* |
||||
* Return: true if the block is bad, false otherwise. |
||||
*/ |
||||
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos) |
||||
{ |
||||
if (nanddev_bbt_is_initialized(nand)) { |
||||
unsigned int entry; |
||||
int status; |
||||
|
||||
entry = nanddev_bbt_pos_to_entry(nand, pos); |
||||
status = nanddev_bbt_get_block_status(nand, entry); |
||||
/* Lazy block status retrieval */ |
||||
if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) { |
||||
if (nand->ops->isbad(nand, pos)) |
||||
status = NAND_BBT_BLOCK_FACTORY_BAD; |
||||
else |
||||
status = NAND_BBT_BLOCK_GOOD; |
||||
|
||||
nanddev_bbt_set_block_status(nand, entry, status); |
||||
} |
||||
|
||||
if (status == NAND_BBT_BLOCK_WORN || |
||||
status == NAND_BBT_BLOCK_FACTORY_BAD) |
||||
return true; |
||||
|
||||
return false; |
||||
} |
||||
|
||||
return nand->ops->isbad(nand, pos); |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_isbad); |
||||
|
||||
/**
|
||||
* nanddev_markbad() - Mark a block as bad |
||||
* @nand: NAND device |
||||
* @pos: position of the block to mark bad |
||||
* |
||||
* Mark a block bad. This function is updating the BBT if available and |
||||
* calls the low-level markbad hook (nand->ops->markbad()). |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos) |
||||
{ |
||||
struct mtd_info *mtd = nanddev_to_mtd(nand); |
||||
unsigned int entry; |
||||
int ret = 0; |
||||
|
||||
if (nanddev_isbad(nand, pos)) |
||||
return 0; |
||||
|
||||
ret = nand->ops->markbad(nand, pos); |
||||
if (ret) |
||||
pr_warn("failed to write BBM to block @%llx (err = %d)\n", |
||||
nanddev_pos_to_offs(nand, pos), ret); |
||||
|
||||
if (!nanddev_bbt_is_initialized(nand)) |
||||
goto out; |
||||
|
||||
entry = nanddev_bbt_pos_to_entry(nand, pos); |
||||
ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN); |
||||
if (ret) |
||||
goto out; |
||||
|
||||
ret = nanddev_bbt_update(nand); |
||||
|
||||
out: |
||||
if (!ret) |
||||
mtd->ecc_stats.badblocks++; |
||||
|
||||
return ret; |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_markbad); |
||||
|
||||
/**
|
||||
* nanddev_isreserved() - Check whether an eraseblock is reserved or not |
||||
* @nand: NAND device |
||||
* @pos: NAND position to test |
||||
* |
||||
* Checks whether the eraseblock pointed by @pos is reserved or not. |
||||
* |
||||
* Return: true if the eraseblock is reserved, false otherwise. |
||||
*/ |
||||
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos) |
||||
{ |
||||
unsigned int entry; |
||||
int status; |
||||
|
||||
if (!nanddev_bbt_is_initialized(nand)) |
||||
return false; |
||||
|
||||
/* Return info from the table */ |
||||
entry = nanddev_bbt_pos_to_entry(nand, pos); |
||||
status = nanddev_bbt_get_block_status(nand, entry); |
||||
return status == NAND_BBT_BLOCK_RESERVED; |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_isreserved); |
||||
|
||||
/**
|
||||
* nanddev_erase() - Erase a NAND portion |
||||
* @nand: NAND device |
||||
* @pos: position of the block to erase |
||||
* |
||||
* Erases the block if it's not bad. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos) |
||||
{ |
||||
if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) { |
||||
pr_warn("attempt to erase a bad/reserved block @%llx\n", |
||||
nanddev_pos_to_offs(nand, pos)); |
||||
return -EIO; |
||||
} |
||||
|
||||
return nand->ops->erase(nand, pos); |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_erase); |
||||
|
||||
/**
|
||||
* nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices |
||||
* @mtd: MTD device |
||||
* @einfo: erase request |
||||
* |
||||
* This is a simple mtd->_erase() implementation iterating over all blocks |
||||
* concerned by @einfo and calling nand->ops->erase() on each of them. |
||||
* |
||||
* Note that mtd->_erase should not be directly assigned to this helper, |
||||
* because there's no locking here. NAND specialized layers should instead |
||||
* implement there own wrapper around nanddev_mtd_erase() taking the |
||||
* appropriate lock before calling nanddev_mtd_erase(). |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo) |
||||
{ |
||||
struct nand_device *nand = mtd_to_nanddev(mtd); |
||||
struct nand_pos pos, last; |
||||
int ret; |
||||
|
||||
nanddev_offs_to_pos(nand, einfo->addr, &pos); |
||||
nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last); |
||||
while (nanddev_pos_cmp(&pos, &last) <= 0) { |
||||
ret = nanddev_erase(nand, &pos); |
||||
if (ret) { |
||||
einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); |
||||
|
||||
return ret; |
||||
} |
||||
|
||||
nanddev_pos_next_eraseblock(nand, &pos); |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_mtd_erase); |
||||
|
||||
/**
|
||||
* nanddev_init() - Initialize a NAND device |
||||
* @nand: NAND device |
||||
* @ops: NAND device operations |
||||
* @owner: NAND device owner |
||||
* |
||||
* Initializes a NAND device object. Consistency checks are done on @ops and |
||||
* @nand->memorg. Also takes care of initializing the BBT. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, |
||||
struct module *owner) |
||||
{ |
||||
struct mtd_info *mtd = nanddev_to_mtd(nand); |
||||
struct nand_memory_organization *memorg = nanddev_get_memorg(nand); |
||||
|
||||
if (!nand || !ops) |
||||
return -EINVAL; |
||||
|
||||
if (!ops->erase || !ops->markbad || !ops->isbad) |
||||
return -EINVAL; |
||||
|
||||
if (!memorg->bits_per_cell || !memorg->pagesize || |
||||
!memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun || |
||||
!memorg->planes_per_lun || !memorg->luns_per_target || |
||||
!memorg->ntargets) |
||||
return -EINVAL; |
||||
|
||||
nand->rowconv.eraseblock_addr_shift = |
||||
fls(memorg->pages_per_eraseblock - 1); |
||||
nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) + |
||||
nand->rowconv.eraseblock_addr_shift; |
||||
|
||||
nand->ops = ops; |
||||
|
||||
mtd->type = memorg->bits_per_cell == 1 ? |
||||
MTD_NANDFLASH : MTD_MLCNANDFLASH; |
||||
mtd->flags = MTD_CAP_NANDFLASH; |
||||
mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock; |
||||
mtd->writesize = memorg->pagesize; |
||||
mtd->writebufsize = memorg->pagesize; |
||||
mtd->oobsize = memorg->oobsize; |
||||
mtd->size = nanddev_size(nand); |
||||
mtd->owner = owner; |
||||
|
||||
return nanddev_bbt_init(nand); |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_init); |
||||
|
||||
/**
|
||||
* nanddev_cleanup() - Release resources allocated in nanddev_init() |
||||
* @nand: NAND device |
||||
* |
||||
* Basically undoes what has been done in nanddev_init(). |
||||
*/ |
||||
void nanddev_cleanup(struct nand_device *nand) |
||||
{ |
||||
if (nanddev_bbt_is_initialized(nand)) |
||||
nanddev_bbt_cleanup(nand); |
||||
} |
||||
EXPORT_SYMBOL_GPL(nanddev_cleanup); |
||||
|
||||
MODULE_DESCRIPTION("Generic NAND framework"); |
||||
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); |
||||
MODULE_LICENSE("GPL v2"); |
@ -0,0 +1,297 @@ |
||||
|
||||
menuconfig NAND |
||||
bool "Raw NAND Device Support" |
||||
if NAND |
||||
|
||||
config SYS_NAND_SELF_INIT |
||||
bool |
||||
help |
||||
This option, if enabled, provides more flexible and linux-like |
||||
NAND initialization process. |
||||
|
||||
config NAND_ATMEL |
||||
bool "Support Atmel NAND controller" |
||||
imply SYS_NAND_USE_FLASH_BBT |
||||
help |
||||
Enable this driver for NAND flash platforms using an Atmel NAND |
||||
controller. |
||||
|
||||
config NAND_DAVINCI |
||||
bool "Support TI Davinci NAND controller" |
||||
help |
||||
Enable this driver for NAND flash controllers available in TI Davinci |
||||
and Keystone2 platforms |
||||
|
||||
config NAND_DENALI |
||||
bool |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
|
||||
config NAND_DENALI_DT |
||||
bool "Support Denali NAND controller as a DT device" |
||||
select NAND_DENALI |
||||
depends on OF_CONTROL && DM |
||||
help |
||||
Enable the driver for NAND flash on platforms using a Denali NAND |
||||
controller as a DT device. |
||||
|
||||
config NAND_DENALI_SPARE_AREA_SKIP_BYTES |
||||
int "Number of bytes skipped in OOB area" |
||||
depends on NAND_DENALI |
||||
range 0 63 |
||||
help |
||||
This option specifies the number of bytes to skip from the beginning |
||||
of OOB area before last ECC sector data starts. This is potentially |
||||
used to preserve the bad block marker in the OOB area. |
||||
|
||||
config NAND_LPC32XX_SLC |
||||
bool "Support LPC32XX_SLC controller" |
||||
help |
||||
Enable the LPC32XX SLC NAND controller. |
||||
|
||||
config NAND_OMAP_GPMC |
||||
bool "Support OMAP GPMC NAND controller" |
||||
depends on ARCH_OMAP2PLUS |
||||
help |
||||
Enables omap_gpmc.c driver for OMAPx and AMxxxx platforms. |
||||
GPMC controller is used for parallel NAND flash devices, and can |
||||
do ECC calculation (not ECC error detection) for HAM1, BCH4, BCH8 |
||||
and BCH16 ECC algorithms. |
||||
|
||||
config NAND_OMAP_GPMC_PREFETCH |
||||
bool "Enable GPMC Prefetch" |
||||
depends on NAND_OMAP_GPMC |
||||
default y |
||||
help |
||||
On OMAP platforms that use the GPMC controller |
||||
(CONFIG_NAND_OMAP_GPMC_PREFETCH), this options enables the code that |
||||
uses the prefetch mode to speed up read operations. |
||||
|
||||
config NAND_OMAP_ELM |
||||
bool "Enable ELM driver for OMAPxx and AMxx platforms." |
||||
depends on NAND_OMAP_GPMC && !OMAP34XX |
||||
help |
||||
ELM controller is used for ECC error detection (not ECC calculation) |
||||
of BCH4, BCH8 and BCH16 ECC algorithms. |
||||
Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine, |
||||
thus such SoC platforms need to depend on software library for ECC error |
||||
detection. However ECC calculation on such plaforms would still be |
||||
done by GPMC controller. |
||||
|
||||
config NAND_VF610_NFC |
||||
bool "Support for Freescale NFC for VF610" |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
help |
||||
Enables support for NAND Flash Controller on some Freescale |
||||
processors like the VF610, MCF54418 or Kinetis K70. |
||||
The driver supports a maximum 2k page size. The driver |
||||
currently does not support hardware ECC. |
||||
|
||||
choice |
||||
prompt "Hardware ECC strength" |
||||
depends on NAND_VF610_NFC |
||||
default SYS_NAND_VF610_NFC_45_ECC_BYTES |
||||
help |
||||
Select the ECC strength used in the hardware BCH ECC block. |
||||
|
||||
config SYS_NAND_VF610_NFC_45_ECC_BYTES |
||||
bool "24-error correction (45 ECC bytes)" |
||||
|
||||
config SYS_NAND_VF610_NFC_60_ECC_BYTES |
||||
bool "32-error correction (60 ECC bytes)" |
||||
|
||||
endchoice |
||||
|
||||
config NAND_PXA3XX |
||||
bool "Support for NAND on PXA3xx and Armada 370/XP/38x" |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
help |
||||
This enables the driver for the NAND flash device found on |
||||
PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). |
||||
|
||||
config NAND_SUNXI |
||||
bool "Support for NAND on Allwinner SoCs" |
||||
default ARCH_SUNXI |
||||
depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUN8I |
||||
select SYS_NAND_SELF_INIT |
||||
select SYS_NAND_U_BOOT_LOCATIONS |
||||
select SPL_NAND_SUPPORT |
||||
imply CMD_NAND |
||||
---help--- |
||||
Enable support for NAND. This option enables the standard and |
||||
SPL drivers. |
||||
The SPL driver only supports reading from the NAND using DMA |
||||
transfers. |
||||
|
||||
if NAND_SUNXI |
||||
|
||||
config NAND_SUNXI_SPL_ECC_STRENGTH |
||||
int "Allwinner NAND SPL ECC Strength" |
||||
default 64 |
||||
|
||||
config NAND_SUNXI_SPL_ECC_SIZE |
||||
int "Allwinner NAND SPL ECC Step Size" |
||||
default 1024 |
||||
|
||||
config NAND_SUNXI_SPL_USABLE_PAGE_SIZE |
||||
int "Allwinner NAND SPL Usable Page Size" |
||||
default 1024 |
||||
|
||||
endif |
||||
|
||||
config NAND_ARASAN |
||||
bool "Configure Arasan Nand" |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
help |
||||
This enables Nand driver support for Arasan nand flash |
||||
controller. This uses the hardware ECC for read and |
||||
write operations. |
||||
|
||||
config NAND_MXC |
||||
bool "MXC NAND support" |
||||
depends on CPU_ARM926EJS || CPU_ARM1136 || MX5 |
||||
imply CMD_NAND |
||||
help |
||||
This enables the NAND driver for the NAND flash controller on the |
||||
i.MX27 / i.MX31 / i.MX5 rocessors. |
||||
|
||||
config NAND_MXS |
||||
bool "MXS NAND support" |
||||
depends on MX23 || MX28 || MX6 || MX7 |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
select APBH_DMA |
||||
select APBH_DMA_BURST if ARCH_MX6 || ARCH_MX7 |
||||
select APBH_DMA_BURST8 if ARCH_MX6 || ARCH_MX7 |
||||
help |
||||
This enables NAND driver for the NAND flash controller on the |
||||
MXS processors. |
||||
|
||||
if NAND_MXS |
||||
|
||||
config NAND_MXS_DT |
||||
bool "Support MXS NAND controller as a DT device" |
||||
depends on OF_CONTROL && MTD |
||||
help |
||||
Enable the driver for MXS NAND flash on platforms using |
||||
device tree. |
||||
|
||||
config NAND_MXS_USE_MINIMUM_ECC |
||||
bool "Use minimum ECC strength supported by the controller" |
||||
default false |
||||
|
||||
endif |
||||
|
||||
config NAND_ZYNQ |
||||
bool "Support for Zynq Nand controller" |
||||
select SYS_NAND_SELF_INIT |
||||
imply CMD_NAND |
||||
help |
||||
This enables Nand driver support for Nand flash controller |
||||
found on Zynq SoC. |
||||
|
||||
config NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS |
||||
bool "Enable use of 1st stage bootloader timing for NAND" |
||||
depends on NAND_ZYNQ |
||||
help |
||||
This flag prevent U-boot reconfigure NAND flash controller and reuse |
||||
the NAND timing from 1st stage bootloader. |
||||
|
||||
comment "Generic NAND options" |
||||
|
||||
config SYS_NAND_BLOCK_SIZE |
||||
hex "NAND chip eraseblock size" |
||||
depends on ARCH_SUNXI |
||||
help |
||||
Number of data bytes in one eraseblock for the NAND chip on the |
||||
board. This is the multiple of NAND_PAGE_SIZE and the number of |
||||
pages. |
||||
|
||||
config SYS_NAND_PAGE_SIZE |
||||
hex "NAND chip page size" |
||||
depends on ARCH_SUNXI |
||||
help |
||||
Number of data bytes in one page for the NAND chip on the |
||||
board, not including the OOB area. |
||||
|
||||
config SYS_NAND_OOBSIZE |
||||
hex "NAND chip OOB size" |
||||
depends on ARCH_SUNXI |
||||
help |
||||
Number of bytes in the Out-Of-Band area for the NAND chip on |
||||
the board. |
||||
|
||||
# Enhance depends when converting drivers to Kconfig which use this config |
||||
# option (mxc_nand, ndfc, omap_gpmc). |
||||
config SYS_NAND_BUSWIDTH_16BIT |
||||
bool "Use 16-bit NAND interface" |
||||
depends on NAND_VF610_NFC || NAND_OMAP_GPMC || NAND_MXC || ARCH_DAVINCI |
||||
help |
||||
Indicates that NAND device has 16-bit wide data-bus. In absence of this |
||||
config, bus-width of NAND device is assumed to be either 8-bit and later |
||||
determined by reading ONFI params. |
||||
Above config is useful when NAND device's bus-width information cannot |
||||
be determined from on-chip ONFI params, like in following scenarios: |
||||
- SPL boot does not support reading of ONFI parameters. This is done to |
||||
keep SPL code foot-print small. |
||||
- In current U-Boot flow using nand_init(), driver initialization |
||||
happens in board_nand_init() which is called before any device probe |
||||
(nand_scan_ident + nand_scan_tail), thus device's ONFI parameters are |
||||
not available while configuring controller. So a static CONFIG_NAND_xx |
||||
is needed to know the device's bus-width in advance. |
||||
|
||||
if SPL |
||||
|
||||
config SYS_NAND_U_BOOT_LOCATIONS |
||||
bool "Define U-boot binaries locations in NAND" |
||||
help |
||||
Enable CONFIG_SYS_NAND_U_BOOT_OFFS though Kconfig. |
||||
This option should not be enabled when compiling U-boot for boards |
||||
defining CONFIG_SYS_NAND_U_BOOT_OFFS in their include/configs/<board>.h |
||||
file. |
||||
|
||||
config SYS_NAND_U_BOOT_OFFS |
||||
hex "Location in NAND to read U-Boot from" |
||||
default 0x800000 if NAND_SUNXI |
||||
depends on SYS_NAND_U_BOOT_LOCATIONS |
||||
help |
||||
Set the offset from the start of the nand where u-boot should be |
||||
loaded from. |
||||
|
||||
config SYS_NAND_U_BOOT_OFFS_REDUND |
||||
hex "Location in NAND to read U-Boot from" |
||||
default SYS_NAND_U_BOOT_OFFS |
||||
depends on SYS_NAND_U_BOOT_LOCATIONS |
||||
help |
||||
Set the offset from the start of the nand where the redundant u-boot |
||||
should be loaded from. |
||||
|
||||
config SPL_NAND_AM33XX_BCH |
||||
bool "Enables SPL-NAND driver which supports ELM based" |
||||
depends on NAND_OMAP_GPMC && !OMAP34XX |
||||
default y |
||||
help |
||||
Hardware ECC correction. This is useful for platforms which have ELM |
||||
hardware engine and use NAND boot mode. |
||||
Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine, |
||||
so those platforms should use CONFIG_SPL_NAND_SIMPLE for enabling |
||||
SPL-NAND driver with software ECC correction support. |
||||
|
||||
config SPL_NAND_DENALI |
||||
bool "Support Denali NAND controller for SPL" |
||||
help |
||||
This is a small implementation of the Denali NAND controller |
||||
for use on SPL. |
||||
|
||||
config SPL_NAND_SIMPLE |
||||
bool "Use simple SPL NAND driver" |
||||
depends on !SPL_NAND_AM33XX_BCH |
||||
help |
||||
Support for NAND boot using simple NAND drivers that |
||||
expose the cmd_ctrl() interface. |
||||
endif |
||||
|
||||
endif # if NAND |
@ -0,0 +1,77 @@ |
||||
# SPDX-License-Identifier: GPL-2.0+
|
||||
#
|
||||
# (C) Copyright 2006
|
||||
# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
|
||||
|
||||
ifdef CONFIG_SPL_BUILD |
||||
|
||||
ifdef CONFIG_SPL_NAND_DRIVERS |
||||
NORMAL_DRIVERS=y
|
||||
endif |
||||
|
||||
obj-$(CONFIG_SPL_NAND_AM33XX_BCH) += am335x_spl_bch.o
|
||||
obj-$(CONFIG_SPL_NAND_DENALI) += denali_spl.o
|
||||
obj-$(CONFIG_SPL_NAND_SIMPLE) += nand_spl_simple.o
|
||||
obj-$(CONFIG_SPL_NAND_LOAD) += nand_spl_load.o
|
||||
obj-$(CONFIG_SPL_NAND_ECC) += nand_ecc.o
|
||||
obj-$(CONFIG_SPL_NAND_BASE) += nand_base.o
|
||||
obj-$(CONFIG_SPL_NAND_IDENT) += nand_ids.o nand_timings.o
|
||||
obj-$(CONFIG_SPL_NAND_INIT) += nand.o
|
||||
ifeq ($(CONFIG_SPL_ENV_SUPPORT),y) |
||||
obj-$(CONFIG_ENV_IS_IN_NAND) += nand_util.o
|
||||
endif |
||||
|
||||
else # not spl
|
||||
|
||||
NORMAL_DRIVERS=y
|
||||
|
||||
obj-y += nand.o
|
||||
obj-y += nand_bbt.o
|
||||
obj-y += nand_ids.o
|
||||
obj-y += nand_util.o
|
||||
obj-y += nand_ecc.o
|
||||
obj-y += nand_base.o
|
||||
obj-y += nand_timings.o
|
||||
|
||||
endif # not spl
|
||||
|
||||
ifdef NORMAL_DRIVERS |
||||
|
||||
obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o
|
||||
|
||||
obj-$(CONFIG_NAND_ATMEL) += atmel_nand.o
|
||||
obj-$(CONFIG_NAND_ARASAN) += arasan_nfc.o
|
||||
obj-$(CONFIG_NAND_DAVINCI) += davinci_nand.o
|
||||
obj-$(CONFIG_NAND_DENALI) += denali.o
|
||||
obj-$(CONFIG_NAND_DENALI_DT) += denali_dt.o
|
||||
obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o
|
||||
obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o
|
||||
obj-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o
|
||||
obj-$(CONFIG_NAND_FSMC) += fsmc_nand.o
|
||||
obj-$(CONFIG_NAND_KB9202) += kb9202_nand.o
|
||||
obj-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o
|
||||
obj-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o
|
||||
obj-$(CONFIG_NAND_LPC32XX_MLC) += lpc32xx_nand_mlc.o
|
||||
obj-$(CONFIG_NAND_LPC32XX_SLC) += lpc32xx_nand_slc.o
|
||||
obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o
|
||||
obj-$(CONFIG_NAND_MXC) += mxc_nand.o
|
||||
obj-$(CONFIG_NAND_MXS) += mxs_nand.o
|
||||
obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o
|
||||
obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o
|
||||
obj-$(CONFIG_NAND_SPEAR) += spr_nand.o
|
||||
obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o
|
||||
obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o
|
||||
obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o
|
||||
obj-$(CONFIG_NAND_PLAT) += nand_plat.o
|
||||
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o
|
||||
obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o
|
||||
|
||||
else # minimal SPL drivers
|
||||
|
||||
obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_spl.o
|
||||
obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_spl.o
|
||||
obj-$(CONFIG_NAND_MXC) += mxc_nand_spl.o
|
||||
obj-$(CONFIG_NAND_MXS) += mxs_nand_spl.o mxs_nand.o
|
||||
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand_spl.o
|
||||
|
||||
endif # drivers
|
@ -1,6 +1,6 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* drivers/mtd/nand/nand_util.c |
||||
* drivers/mtd/nand/raw/nand_util.c |
||||
* |
||||
* Copyright (C) 2006 by Weiss-Electronic GmbH. |
||||
* All rights reserved. |
@ -1,6 +1,6 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* drivers/mtd/nand/pxa3xx_nand.c |
||||
* drivers/mtd/nand/raw/pxa3xx_nand.c |
||||
* |
||||
* Copyright © 2005 Intel Corporation |
||||
* Copyright © 2006 Marvell International Ltd. |
@ -0,0 +1,7 @@ |
||||
menuconfig MTD_SPI_NAND |
||||
bool "SPI NAND device Support" |
||||
depends on MTD && DM_SPI |
||||
select MTD_NAND_CORE |
||||
select SPI_MEM |
||||
help |
||||
This is the framework for the SPI NAND device drivers. |
@ -0,0 +1,4 @@ |
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
spinand-objs := core.o macronix.o micron.o winbond.o
|
||||
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,146 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2018 Macronix |
||||
* |
||||
* Author: Boris Brezillon <boris.brezillon@bootlin.com> |
||||
*/ |
||||
|
||||
#ifndef __UBOOT__ |
||||
#include <linux/device.h> |
||||
#include <linux/kernel.h> |
||||
#endif |
||||
#include <linux/mtd/spinand.h> |
||||
|
||||
#define SPINAND_MFR_MACRONIX 0xC2 |
||||
|
||||
static SPINAND_OP_VARIANTS(read_cache_variants, |
||||
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); |
||||
|
||||
static SPINAND_OP_VARIANTS(write_cache_variants, |
||||
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), |
||||
SPINAND_PROG_LOAD(true, 0, NULL, 0)); |
||||
|
||||
static SPINAND_OP_VARIANTS(update_cache_variants, |
||||
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), |
||||
SPINAND_PROG_LOAD(false, 0, NULL, 0)); |
||||
|
||||
static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section, |
||||
struct mtd_oob_region *region) |
||||
{ |
||||
return -ERANGE; |
||||
} |
||||
|
||||
static int mx35lfxge4ab_ooblayout_free(struct mtd_info *mtd, int section, |
||||
struct mtd_oob_region *region) |
||||
{ |
||||
if (section) |
||||
return -ERANGE; |
||||
|
||||
region->offset = 2; |
||||
region->length = mtd->oobsize - 2; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = { |
||||
.ecc = mx35lfxge4ab_ooblayout_ecc, |
||||
.free = mx35lfxge4ab_ooblayout_free, |
||||
}; |
||||
|
||||
static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) |
||||
{ |
||||
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1), |
||||
SPI_MEM_OP_NO_ADDR, |
||||
SPI_MEM_OP_DUMMY(1, 1), |
||||
SPI_MEM_OP_DATA_IN(1, eccsr, 1)); |
||||
|
||||
return spi_mem_exec_op(spinand->slave, &op); |
||||
} |
||||
|
||||
static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, |
||||
u8 status) |
||||
{ |
||||
struct nand_device *nand = spinand_to_nand(spinand); |
||||
u8 eccsr; |
||||
|
||||
switch (status & STATUS_ECC_MASK) { |
||||
case STATUS_ECC_NO_BITFLIPS: |
||||
return 0; |
||||
|
||||
case STATUS_ECC_UNCOR_ERROR: |
||||
return -EBADMSG; |
||||
|
||||
case STATUS_ECC_HAS_BITFLIPS: |
||||
/*
|
||||
* Let's try to retrieve the real maximum number of bitflips |
||||
* in order to avoid forcing the wear-leveling layer to move |
||||
* data around if it's not necessary. |
||||
*/ |
||||
if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr)) |
||||
return nand->eccreq.strength; |
||||
|
||||
if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr)) |
||||
return nand->eccreq.strength; |
||||
|
||||
return eccsr; |
||||
|
||||
default: |
||||
break; |
||||
} |
||||
|
||||
return -EINVAL; |
||||
} |
||||
|
||||
static const struct spinand_info macronix_spinand_table[] = { |
||||
SPINAND_INFO("MX35LF1GE4AB", 0x12, |
||||
NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1), |
||||
NAND_ECCREQ(4, 512), |
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants, |
||||
&write_cache_variants, |
||||
&update_cache_variants), |
||||
SPINAND_HAS_QE_BIT, |
||||
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, |
||||
mx35lf1ge4ab_ecc_get_status)), |
||||
SPINAND_INFO("MX35LF2GE4AB", 0x22, |
||||
NAND_MEMORG(1, 2048, 64, 64, 2048, 2, 1, 1), |
||||
NAND_ECCREQ(4, 512), |
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants, |
||||
&write_cache_variants, |
||||
&update_cache_variants), |
||||
SPINAND_HAS_QE_BIT, |
||||
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), |
||||
}; |
||||
|
||||
static int macronix_spinand_detect(struct spinand_device *spinand) |
||||
{ |
||||
u8 *id = spinand->id.data; |
||||
int ret; |
||||
|
||||
/*
|
||||
* Macronix SPI NAND read ID needs a dummy byte, so the first byte in |
||||
* raw_id is garbage. |
||||
*/ |
||||
if (id[1] != SPINAND_MFR_MACRONIX) |
||||
return 0; |
||||
|
||||
ret = spinand_match_and_init(spinand, macronix_spinand_table, |
||||
ARRAY_SIZE(macronix_spinand_table), |
||||
id[2]); |
||||
if (ret) |
||||
return ret; |
||||
|
||||
return 1; |
||||
} |
||||
|
||||
static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = { |
||||
.detect = macronix_spinand_detect, |
||||
}; |
||||
|
||||
const struct spinand_manufacturer macronix_spinand_manufacturer = { |
||||
.id = SPINAND_MFR_MACRONIX, |
||||
.name = "Macronix", |
||||
.ops = ¯onix_spinand_manuf_ops, |
||||
}; |
@ -0,0 +1,135 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2016-2017 Micron Technology, Inc. |
||||
* |
||||
* Authors: |
||||
* Peter Pan <peterpandong@micron.com> |
||||
*/ |
||||
|
||||
#ifndef __UBOOT__ |
||||
#include <linux/device.h> |
||||
#include <linux/kernel.h> |
||||
#endif |
||||
#include <linux/mtd/spinand.h> |
||||
|
||||
#define SPINAND_MFR_MICRON 0x2c |
||||
|
||||
#define MICRON_STATUS_ECC_MASK GENMASK(7, 4) |
||||
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4) |
||||
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4) |
||||
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4) |
||||
#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4) |
||||
|
||||
static SPINAND_OP_VARIANTS(read_cache_variants, |
||||
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); |
||||
|
||||
static SPINAND_OP_VARIANTS(write_cache_variants, |
||||
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), |
||||
SPINAND_PROG_LOAD(true, 0, NULL, 0)); |
||||
|
||||
static SPINAND_OP_VARIANTS(update_cache_variants, |
||||
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), |
||||
SPINAND_PROG_LOAD(false, 0, NULL, 0)); |
||||
|
||||
static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section, |
||||
struct mtd_oob_region *region) |
||||
{ |
||||
if (section) |
||||
return -ERANGE; |
||||
|
||||
region->offset = 64; |
||||
region->length = 64; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section, |
||||
struct mtd_oob_region *region) |
||||
{ |
||||
if (section) |
||||
return -ERANGE; |
||||
|
||||
/* Reserve 2 bytes for the BBM. */ |
||||
region->offset = 2; |
||||
region->length = 62; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = { |
||||
.ecc = mt29f2g01abagd_ooblayout_ecc, |
||||
.free = mt29f2g01abagd_ooblayout_free, |
||||
}; |
||||
|
||||
static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand, |
||||
u8 status) |
||||
{ |
||||
switch (status & MICRON_STATUS_ECC_MASK) { |
||||
case STATUS_ECC_NO_BITFLIPS: |
||||
return 0; |
||||
|
||||
case STATUS_ECC_UNCOR_ERROR: |
||||
return -EBADMSG; |
||||
|
||||
case MICRON_STATUS_ECC_1TO3_BITFLIPS: |
||||
return 3; |
||||
|
||||
case MICRON_STATUS_ECC_4TO6_BITFLIPS: |
||||
return 6; |
||||
|
||||
case MICRON_STATUS_ECC_7TO8_BITFLIPS: |
||||
return 8; |
||||
|
||||
default: |
||||
break; |
||||
} |
||||
|
||||
return -EINVAL; |
||||
} |
||||
|
||||
static const struct spinand_info micron_spinand_table[] = { |
||||
SPINAND_INFO("MT29F2G01ABAGD", 0x24, |
||||
NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1), |
||||
NAND_ECCREQ(8, 512), |
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants, |
||||
&write_cache_variants, |
||||
&update_cache_variants), |
||||
0, |
||||
SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout, |
||||
mt29f2g01abagd_ecc_get_status)), |
||||
}; |
||||
|
||||
static int micron_spinand_detect(struct spinand_device *spinand) |
||||
{ |
||||
u8 *id = spinand->id.data; |
||||
int ret; |
||||
|
||||
/*
|
||||
* Micron SPI NAND read ID need a dummy byte, |
||||
* so the first byte in raw_id is dummy. |
||||
*/ |
||||
if (id[1] != SPINAND_MFR_MICRON) |
||||
return 0; |
||||
|
||||
ret = spinand_match_and_init(spinand, micron_spinand_table, |
||||
ARRAY_SIZE(micron_spinand_table), id[2]); |
||||
if (ret) |
||||
return ret; |
||||
|
||||
return 1; |
||||
} |
||||
|
||||
static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = { |
||||
.detect = micron_spinand_detect, |
||||
}; |
||||
|
||||
const struct spinand_manufacturer micron_spinand_manufacturer = { |
||||
.id = SPINAND_MFR_MICRON, |
||||
.name = "Micron", |
||||
.ops = µn_spinand_manuf_ops, |
||||
}; |
@ -0,0 +1,143 @@ |
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2017 exceet electronics GmbH |
||||
* |
||||
* Authors: |
||||
* Frieder Schrempf <frieder.schrempf@exceet.de> |
||||
* Boris Brezillon <boris.brezillon@bootlin.com> |
||||
*/ |
||||
|
||||
#ifndef __UBOOT__ |
||||
#include <linux/device.h> |
||||
#include <linux/kernel.h> |
||||
#endif |
||||
#include <linux/mtd/spinand.h> |
||||
|
||||
#define SPINAND_MFR_WINBOND 0xEF |
||||
|
||||
#define WINBOND_CFG_BUF_READ BIT(3) |
||||
|
||||
static SPINAND_OP_VARIANTS(read_cache_variants, |
||||
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), |
||||
SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); |
||||
|
||||
static SPINAND_OP_VARIANTS(write_cache_variants, |
||||
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), |
||||
SPINAND_PROG_LOAD(true, 0, NULL, 0)); |
||||
|
||||
static SPINAND_OP_VARIANTS(update_cache_variants, |
||||
SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), |
||||
SPINAND_PROG_LOAD(false, 0, NULL, 0)); |
||||
|
||||
static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section, |
||||
struct mtd_oob_region *region) |
||||
{ |
||||
if (section > 3) |
||||
return -ERANGE; |
||||
|
||||
region->offset = (16 * section) + 8; |
||||
region->length = 8; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static int w25m02gv_ooblayout_free(struct mtd_info *mtd, int section, |
||||
struct mtd_oob_region *region) |
||||
{ |
||||
if (section > 3) |
||||
return -ERANGE; |
||||
|
||||
region->offset = (16 * section) + 2; |
||||
region->length = 6; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static const struct mtd_ooblayout_ops w25m02gv_ooblayout = { |
||||
.ecc = w25m02gv_ooblayout_ecc, |
||||
.free = w25m02gv_ooblayout_free, |
||||
}; |
||||
|
||||
static int w25m02gv_select_target(struct spinand_device *spinand, |
||||
unsigned int target) |
||||
{ |
||||
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1), |
||||
SPI_MEM_OP_NO_ADDR, |
||||
SPI_MEM_OP_NO_DUMMY, |
||||
SPI_MEM_OP_DATA_OUT(1, |
||||
spinand->scratchbuf, |
||||
1)); |
||||
|
||||
*spinand->scratchbuf = target; |
||||
return spi_mem_exec_op(spinand->slave, &op); |
||||
} |
||||
|
||||
static const struct spinand_info winbond_spinand_table[] = { |
||||
SPINAND_INFO("W25M02GV", 0xAB, |
||||
NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 2), |
||||
NAND_ECCREQ(1, 512), |
||||
SPINAND_INFO_OP_VARIANTS(&read_cache_variants, |
||||
&write_cache_variants, |
||||
&update_cache_variants), |
||||
0, |
||||
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL), |
||||
SPINAND_SELECT_TARGET(w25m02gv_select_target)), |
||||
}; |
||||
|
||||
/**
|
||||
* winbond_spinand_detect - initialize device related part in spinand_device |
||||
* struct if it is a Winbond device. |
||||
* @spinand: SPI NAND device structure |
||||
*/ |
||||
static int winbond_spinand_detect(struct spinand_device *spinand) |
||||
{ |
||||
u8 *id = spinand->id.data; |
||||
int ret; |
||||
|
||||
/*
|
||||
* Winbond SPI NAND read ID need a dummy byte, |
||||
* so the first byte in raw_id is dummy. |
||||
*/ |
||||
if (id[1] != SPINAND_MFR_WINBOND) |
||||
return 0; |
||||
|
||||
ret = spinand_match_and_init(spinand, winbond_spinand_table, |
||||
ARRAY_SIZE(winbond_spinand_table), id[2]); |
||||
if (ret) |
||||
return ret; |
||||
|
||||
return 1; |
||||
} |
||||
|
||||
static int winbond_spinand_init(struct spinand_device *spinand) |
||||
{ |
||||
struct nand_device *nand = spinand_to_nand(spinand); |
||||
unsigned int i; |
||||
|
||||
/*
|
||||
* Make sure all dies are in buffer read mode and not continuous read |
||||
* mode. |
||||
*/ |
||||
for (i = 0; i < nand->memorg.ntargets; i++) { |
||||
spinand_select_target(spinand, i); |
||||
spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ, |
||||
WINBOND_CFG_BUF_READ); |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = { |
||||
.detect = winbond_spinand_detect, |
||||
.init = winbond_spinand_init, |
||||
}; |
||||
|
||||
const struct spinand_manufacturer winbond_spinand_manufacturer = { |
||||
.id = SPINAND_MFR_WINBOND, |
||||
.name = "Winbond", |
||||
.ops = &winbond_spinand_manuf_ops, |
||||
}; |
@ -0,0 +1,501 @@ |
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright (C) 2018 Exceet Electronics GmbH |
||||
* Copyright (C) 2018 Bootlin |
||||
* |
||||
* Author: Boris Brezillon <boris.brezillon@bootlin.com> |
||||
*/ |
||||
|
||||
#ifndef __UBOOT__ |
||||
#include <linux/dmaengine.h> |
||||
#include <linux/pm_runtime.h> |
||||
#include "internals.h" |
||||
#else |
||||
#include <spi.h> |
||||
#include <spi-mem.h> |
||||
#endif |
||||
|
||||
#ifndef __UBOOT__ |
||||
/**
|
||||
* spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a |
||||
* memory operation |
||||
* @ctlr: the SPI controller requesting this dma_map() |
||||
* @op: the memory operation containing the buffer to map |
||||
* @sgt: a pointer to a non-initialized sg_table that will be filled by this |
||||
* function |
||||
* |
||||
* Some controllers might want to do DMA on the data buffer embedded in @op. |
||||
* This helper prepares everything for you and provides a ready-to-use |
||||
* sg_table. This function is not intended to be called from spi drivers. |
||||
* Only SPI controller drivers should use it. |
||||
* Note that the caller must ensure the memory region pointed by |
||||
* op->data.buf.{in,out} is DMA-able before calling this function. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, |
||||
const struct spi_mem_op *op, |
||||
struct sg_table *sgt) |
||||
{ |
||||
struct device *dmadev; |
||||
|
||||
if (!op->data.nbytes) |
||||
return -EINVAL; |
||||
|
||||
if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) |
||||
dmadev = ctlr->dma_tx->device->dev; |
||||
else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) |
||||
dmadev = ctlr->dma_rx->device->dev; |
||||
else |
||||
dmadev = ctlr->dev.parent; |
||||
|
||||
if (!dmadev) |
||||
return -EINVAL; |
||||
|
||||
return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, |
||||
op->data.dir == SPI_MEM_DATA_IN ? |
||||
DMA_FROM_DEVICE : DMA_TO_DEVICE); |
||||
} |
||||
EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data); |
||||
|
||||
/**
|
||||
* spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a |
||||
* memory operation |
||||
* @ctlr: the SPI controller requesting this dma_unmap() |
||||
* @op: the memory operation containing the buffer to unmap |
||||
* @sgt: a pointer to an sg_table previously initialized by |
||||
* spi_controller_dma_map_mem_op_data() |
||||
* |
||||
* Some controllers might want to do DMA on the data buffer embedded in @op. |
||||
* This helper prepares things so that the CPU can access the |
||||
* op->data.buf.{in,out} buffer again. |
||||
* |
||||
* This function is not intended to be called from SPI drivers. Only SPI |
||||
* controller drivers should use it. |
||||
* |
||||
* This function should be called after the DMA operation has finished and is |
||||
* only valid if the previous spi_controller_dma_map_mem_op_data() call |
||||
* returned 0. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, |
||||
const struct spi_mem_op *op, |
||||
struct sg_table *sgt) |
||||
{ |
||||
struct device *dmadev; |
||||
|
||||
if (!op->data.nbytes) |
||||
return; |
||||
|
||||
if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) |
||||
dmadev = ctlr->dma_tx->device->dev; |
||||
else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) |
||||
dmadev = ctlr->dma_rx->device->dev; |
||||
else |
||||
dmadev = ctlr->dev.parent; |
||||
|
||||
spi_unmap_buf(ctlr, dmadev, sgt, |
||||
op->data.dir == SPI_MEM_DATA_IN ? |
||||
DMA_FROM_DEVICE : DMA_TO_DEVICE); |
||||
} |
||||
EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); |
||||
#endif /* __UBOOT__ */ |
||||
|
||||
static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx) |
||||
{ |
||||
u32 mode = slave->mode; |
||||
|
||||
switch (buswidth) { |
||||
case 1: |
||||
return 0; |
||||
|
||||
case 2: |
||||
if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || |
||||
(!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) |
||||
return 0; |
||||
|
||||
break; |
||||
|
||||
case 4: |
||||
if ((tx && (mode & SPI_TX_QUAD)) || |
||||
(!tx && (mode & SPI_RX_QUAD))) |
||||
return 0; |
||||
|
||||
break; |
||||
|
||||
default: |
||||
break; |
||||
} |
||||
|
||||
return -ENOTSUPP; |
||||
} |
||||
|
||||
bool spi_mem_default_supports_op(struct spi_slave *slave, |
||||
const struct spi_mem_op *op) |
||||
{ |
||||
if (spi_check_buswidth_req(slave, op->cmd.buswidth, true)) |
||||
return false; |
||||
|
||||
if (op->addr.nbytes && |
||||
spi_check_buswidth_req(slave, op->addr.buswidth, true)) |
||||
return false; |
||||
|
||||
if (op->dummy.nbytes && |
||||
spi_check_buswidth_req(slave, op->dummy.buswidth, true)) |
||||
return false; |
||||
|
||||
if (op->data.nbytes && |
||||
spi_check_buswidth_req(slave, op->data.buswidth, |
||||
op->data.dir == SPI_MEM_DATA_OUT)) |
||||
return false; |
||||
|
||||
return true; |
||||
} |
||||
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); |
||||
|
||||
/**
|
||||
* spi_mem_supports_op() - Check if a memory device and the controller it is |
||||
* connected to support a specific memory operation |
||||
* @slave: the SPI device |
||||
* @op: the memory operation to check |
||||
* |
||||
* Some controllers are only supporting Single or Dual IOs, others might only |
||||
* support specific opcodes, or it can even be that the controller and device |
||||
* both support Quad IOs but the hardware prevents you from using it because |
||||
* only 2 IO lines are connected. |
||||
* |
||||
* This function checks whether a specific operation is supported. |
||||
* |
||||
* Return: true if @op is supported, false otherwise. |
||||
*/ |
||||
bool spi_mem_supports_op(struct spi_slave *slave, |
||||
const struct spi_mem_op *op) |
||||
{ |
||||
struct udevice *bus = slave->dev->parent; |
||||
struct dm_spi_ops *ops = spi_get_ops(bus); |
||||
|
||||
if (ops->mem_ops && ops->mem_ops->supports_op) |
||||
return ops->mem_ops->supports_op(slave, op); |
||||
|
||||
return spi_mem_default_supports_op(slave, op); |
||||
} |
||||
EXPORT_SYMBOL_GPL(spi_mem_supports_op); |
||||
|
||||
/**
|
||||
* spi_mem_exec_op() - Execute a memory operation |
||||
* @slave: the SPI device |
||||
* @op: the memory operation to execute |
||||
* |
||||
* Executes a memory operation. |
||||
* |
||||
* This function first checks that @op is supported and then tries to execute |
||||
* it. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op) |
||||
{ |
||||
struct udevice *bus = slave->dev->parent; |
||||
struct dm_spi_ops *ops = spi_get_ops(bus); |
||||
unsigned int pos = 0; |
||||
const u8 *tx_buf = NULL; |
||||
u8 *rx_buf = NULL; |
||||
u8 *op_buf; |
||||
int op_len; |
||||
u32 flag; |
||||
int ret; |
||||
int i; |
||||
|
||||
if (!spi_mem_supports_op(slave, op)) |
||||
return -ENOTSUPP; |
||||
|
||||
if (ops->mem_ops) { |
||||
#ifndef __UBOOT__ |
||||
/*
|
||||
* Flush the message queue before executing our SPI memory |
||||
* operation to prevent preemption of regular SPI transfers. |
||||
*/ |
||||
spi_flush_queue(ctlr); |
||||
|
||||
if (ctlr->auto_runtime_pm) { |
||||
ret = pm_runtime_get_sync(ctlr->dev.parent); |
||||
if (ret < 0) { |
||||
dev_err(&ctlr->dev, |
||||
"Failed to power device: %d\n", |
||||
ret); |
||||
return ret; |
||||
} |
||||
} |
||||
|
||||
mutex_lock(&ctlr->bus_lock_mutex); |
||||
mutex_lock(&ctlr->io_mutex); |
||||
#endif |
||||
ret = ops->mem_ops->exec_op(slave, op); |
||||
#ifndef __UBOOT__ |
||||
mutex_unlock(&ctlr->io_mutex); |
||||
mutex_unlock(&ctlr->bus_lock_mutex); |
||||
|
||||
if (ctlr->auto_runtime_pm) |
||||
pm_runtime_put(ctlr->dev.parent); |
||||
#endif |
||||
|
||||
/*
|
||||
* Some controllers only optimize specific paths (typically the |
||||
* read path) and expect the core to use the regular SPI |
||||
* interface in other cases. |
||||
*/ |
||||
if (!ret || ret != -ENOTSUPP) |
||||
return ret; |
||||
} |
||||
|
||||
#ifndef __UBOOT__ |
||||
tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + |
||||
op->dummy.nbytes; |
||||
|
||||
/*
|
||||
* Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so |
||||
* we're guaranteed that this buffer is DMA-able, as required by the |
||||
* SPI layer. |
||||
*/ |
||||
tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); |
||||
if (!tmpbuf) |
||||
return -ENOMEM; |
||||
|
||||
spi_message_init(&msg); |
||||
|
||||
tmpbuf[0] = op->cmd.opcode; |
||||
xfers[xferpos].tx_buf = tmpbuf; |
||||
xfers[xferpos].len = sizeof(op->cmd.opcode); |
||||
xfers[xferpos].tx_nbits = op->cmd.buswidth; |
||||
spi_message_add_tail(&xfers[xferpos], &msg); |
||||
xferpos++; |
||||
totalxferlen++; |
||||
|
||||
if (op->addr.nbytes) { |
||||
int i; |
||||
|
||||
for (i = 0; i < op->addr.nbytes; i++) |
||||
tmpbuf[i + 1] = op->addr.val >> |
||||
(8 * (op->addr.nbytes - i - 1)); |
||||
|
||||
xfers[xferpos].tx_buf = tmpbuf + 1; |
||||
xfers[xferpos].len = op->addr.nbytes; |
||||
xfers[xferpos].tx_nbits = op->addr.buswidth; |
||||
spi_message_add_tail(&xfers[xferpos], &msg); |
||||
xferpos++; |
||||
totalxferlen += op->addr.nbytes; |
||||
} |
||||
|
||||
if (op->dummy.nbytes) { |
||||
memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes); |
||||
xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1; |
||||
xfers[xferpos].len = op->dummy.nbytes; |
||||
xfers[xferpos].tx_nbits = op->dummy.buswidth; |
||||
spi_message_add_tail(&xfers[xferpos], &msg); |
||||
xferpos++; |
||||
totalxferlen += op->dummy.nbytes; |
||||
} |
||||
|
||||
if (op->data.nbytes) { |
||||
if (op->data.dir == SPI_MEM_DATA_IN) { |
||||
xfers[xferpos].rx_buf = op->data.buf.in; |
||||
xfers[xferpos].rx_nbits = op->data.buswidth; |
||||
} else { |
||||
xfers[xferpos].tx_buf = op->data.buf.out; |
||||
xfers[xferpos].tx_nbits = op->data.buswidth; |
||||
} |
||||
|
||||
xfers[xferpos].len = op->data.nbytes; |
||||
spi_message_add_tail(&xfers[xferpos], &msg); |
||||
xferpos++; |
||||
totalxferlen += op->data.nbytes; |
||||
} |
||||
|
||||
ret = spi_sync(slave, &msg); |
||||
|
||||
kfree(tmpbuf); |
||||
|
||||
if (ret) |
||||
return ret; |
||||
|
||||
if (msg.actual_length != totalxferlen) |
||||
return -EIO; |
||||
#else |
||||
|
||||
/* U-Boot does not support parallel SPI data lanes */ |
||||
if ((op->cmd.buswidth != 1) || |
||||
(op->addr.nbytes && op->addr.buswidth != 1) || |
||||
(op->dummy.nbytes && op->dummy.buswidth != 1) || |
||||
(op->data.nbytes && op->data.buswidth != 1)) { |
||||
printf("Dual/Quad raw SPI transfers not supported\n"); |
||||
return -ENOTSUPP; |
||||
} |
||||
|
||||
if (op->data.nbytes) { |
||||
if (op->data.dir == SPI_MEM_DATA_IN) |
||||
rx_buf = op->data.buf.in; |
||||
else |
||||
tx_buf = op->data.buf.out; |
||||
} |
||||
|
||||
op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; |
||||
op_buf = calloc(1, op_len); |
||||
|
||||
ret = spi_claim_bus(slave); |
||||
if (ret < 0) |
||||
return ret; |
||||
|
||||
op_buf[pos++] = op->cmd.opcode; |
||||
|
||||
if (op->addr.nbytes) { |
||||
for (i = 0; i < op->addr.nbytes; i++) |
||||
op_buf[pos + i] = op->addr.val >> |
||||
(8 * (op->addr.nbytes - i - 1)); |
||||
|
||||
pos += op->addr.nbytes; |
||||
} |
||||
|
||||
if (op->dummy.nbytes) |
||||
memset(op_buf + pos, 0xff, op->dummy.nbytes); |
||||
|
||||
/* 1st transfer: opcode + address + dummy cycles */ |
||||
flag = SPI_XFER_BEGIN; |
||||
/* Make sure to set END bit if no tx or rx data messages follow */ |
||||
if (!tx_buf && !rx_buf) |
||||
flag |= SPI_XFER_END; |
||||
|
||||
ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag); |
||||
if (ret) |
||||
return ret; |
||||
|
||||
/* 2nd transfer: rx or tx data path */ |
||||
if (tx_buf || rx_buf) { |
||||
ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf, |
||||
rx_buf, SPI_XFER_END); |
||||
if (ret) |
||||
return ret; |
||||
} |
||||
|
||||
spi_release_bus(slave); |
||||
|
||||
for (i = 0; i < pos; i++) |
||||
debug("%02x ", op_buf[i]); |
||||
debug("| [%dB %s] ", |
||||
tx_buf || rx_buf ? op->data.nbytes : 0, |
||||
tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-"); |
||||
for (i = 0; i < op->data.nbytes; i++) |
||||
debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]); |
||||
debug("[ret %d]\n", ret); |
||||
|
||||
free(op_buf); |
||||
|
||||
if (ret < 0) |
||||
return ret; |
||||
#endif /* __UBOOT__ */ |
||||
|
||||
return 0; |
||||
} |
||||
EXPORT_SYMBOL_GPL(spi_mem_exec_op); |
||||
|
||||
/**
|
||||
* spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to |
||||
* match controller limitations |
||||
* @slave: the SPI device |
||||
* @op: the operation to adjust |
||||
* |
||||
* Some controllers have FIFO limitations and must split a data transfer |
||||
* operation into multiple ones, others require a specific alignment for |
||||
* optimized accesses. This function allows SPI mem drivers to split a single |
||||
* operation into multiple sub-operations when required. |
||||
* |
||||
* Return: a negative error code if the controller can't properly adjust @op, |
||||
* 0 otherwise. Note that @op->data.nbytes will be updated if @op |
||||
* can't be handled in a single step. |
||||
*/ |
||||
int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op) |
||||
{ |
||||
struct udevice *bus = slave->dev->parent; |
||||
struct dm_spi_ops *ops = spi_get_ops(bus); |
||||
|
||||
if (ops->mem_ops && ops->mem_ops->adjust_op_size) |
||||
return ops->mem_ops->adjust_op_size(slave, op); |
||||
|
||||
return 0; |
||||
} |
||||
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); |
||||
|
||||
#ifndef __UBOOT__ |
||||
static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) |
||||
{ |
||||
return container_of(drv, struct spi_mem_driver, spidrv.driver); |
||||
} |
||||
|
||||
static int spi_mem_probe(struct spi_device *spi) |
||||
{ |
||||
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); |
||||
struct spi_mem *mem; |
||||
|
||||
mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL); |
||||
if (!mem) |
||||
return -ENOMEM; |
||||
|
||||
mem->spi = spi; |
||||
spi_set_drvdata(spi, mem); |
||||
|
||||
return memdrv->probe(mem); |
||||
} |
||||
|
||||
static int spi_mem_remove(struct spi_device *spi) |
||||
{ |
||||
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); |
||||
struct spi_mem *mem = spi_get_drvdata(spi); |
||||
|
||||
if (memdrv->remove) |
||||
return memdrv->remove(mem); |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
static void spi_mem_shutdown(struct spi_device *spi) |
||||
{ |
||||
struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); |
||||
struct spi_mem *mem = spi_get_drvdata(spi); |
||||
|
||||
if (memdrv->shutdown) |
||||
memdrv->shutdown(mem); |
||||
} |
||||
|
||||
/**
|
||||
* spi_mem_driver_register_with_owner() - Register a SPI memory driver |
||||
* @memdrv: the SPI memory driver to register |
||||
* @owner: the owner of this driver |
||||
* |
||||
* Registers a SPI memory driver. |
||||
* |
||||
* Return: 0 in case of success, a negative error core otherwise. |
||||
*/ |
||||
|
||||
int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv, |
||||
struct module *owner) |
||||
{ |
||||
memdrv->spidrv.probe = spi_mem_probe; |
||||
memdrv->spidrv.remove = spi_mem_remove; |
||||
memdrv->spidrv.shutdown = spi_mem_shutdown; |
||||
|
||||
return __spi_register_driver(owner, &memdrv->spidrv); |
||||
} |
||||
EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner); |
||||
|
||||
/**
|
||||
* spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver |
||||
* @memdrv: the SPI memory driver to unregister |
||||
* |
||||
* Unregisters a SPI memory driver. |
||||
*/ |
||||
void spi_mem_driver_unregister(struct spi_mem_driver *memdrv) |
||||
{ |
||||
spi_unregister_driver(&memdrv->spidrv); |
||||
} |
||||
EXPORT_SYMBOL_GPL(spi_mem_driver_unregister); |
||||
#endif /* __UBOOT__ */ |
@ -0,0 +1,734 @@ |
||||
/* SPDX-License-Identifier: GPL-2.0 */ |
||||
/*
|
||||
* Copyright 2017 - Free Electrons |
||||
* |
||||
* Authors: |
||||
* Boris Brezillon <boris.brezillon@free-electrons.com> |
||||
* Peter Pan <peterpandong@micron.com> |
||||
*/ |
||||
|
||||
#ifndef __LINUX_MTD_NAND_H |
||||
#define __LINUX_MTD_NAND_H |
||||
|
||||
#include <linux/mtd/mtd.h> |
||||
|
||||
/**
|
||||
* struct nand_memory_organization - Memory organization structure |
||||
* @bits_per_cell: number of bits per NAND cell |
||||
* @pagesize: page size |
||||
* @oobsize: OOB area size |
||||
* @pages_per_eraseblock: number of pages per eraseblock |
||||
* @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) |
||||
* @planes_per_lun: number of planes per LUN |
||||
* @luns_per_target: number of LUN per target (target is a synonym for die) |
||||
* @ntargets: total number of targets exposed by the NAND device |
||||
*/ |
||||
struct nand_memory_organization { |
||||
unsigned int bits_per_cell; |
||||
unsigned int pagesize; |
||||
unsigned int oobsize; |
||||
unsigned int pages_per_eraseblock; |
||||
unsigned int eraseblocks_per_lun; |
||||
unsigned int planes_per_lun; |
||||
unsigned int luns_per_target; |
||||
unsigned int ntargets; |
||||
}; |
||||
|
||||
#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \ |
||||
{ \
|
||||
.bits_per_cell = (bpc), \
|
||||
.pagesize = (ps), \
|
||||
.oobsize = (os), \
|
||||
.pages_per_eraseblock = (ppe), \
|
||||
.eraseblocks_per_lun = (epl), \
|
||||
.planes_per_lun = (ppl), \
|
||||
.luns_per_target = (lpt), \
|
||||
.ntargets = (nt), \
|
||||
} |
||||
|
||||
/**
|
||||
* struct nand_row_converter - Information needed to convert an absolute offset |
||||
* into a row address |
||||
* @lun_addr_shift: position of the LUN identifier in the row address |
||||
* @eraseblock_addr_shift: position of the eraseblock identifier in the row |
||||
* address |
||||
*/ |
||||
struct nand_row_converter { |
||||
unsigned int lun_addr_shift; |
||||
unsigned int eraseblock_addr_shift; |
||||
}; |
||||
|
||||
/**
|
||||
* struct nand_pos - NAND position object |
||||
* @target: the NAND target/die |
||||
* @lun: the LUN identifier |
||||
* @plane: the plane within the LUN |
||||
* @eraseblock: the eraseblock within the LUN |
||||
* @page: the page within the LUN |
||||
* |
||||
* These information are usually used by specific sub-layers to select the |
||||
* appropriate target/die and generate a row address to pass to the device. |
||||
*/ |
||||
struct nand_pos { |
||||
unsigned int target; |
||||
unsigned int lun; |
||||
unsigned int plane; |
||||
unsigned int eraseblock; |
||||
unsigned int page; |
||||
}; |
||||
|
||||
/**
|
||||
* struct nand_page_io_req - NAND I/O request object |
||||
* @pos: the position this I/O request is targeting |
||||
* @dataoffs: the offset within the page |
||||
* @datalen: number of data bytes to read from/write to this page |
||||
* @databuf: buffer to store data in or get data from |
||||
* @ooboffs: the OOB offset within the page |
||||
* @ooblen: the number of OOB bytes to read from/write to this page |
||||
* @oobbuf: buffer to store OOB data in or get OOB data from |
||||
* @mode: one of the %MTD_OPS_XXX mode |
||||
* |
||||
* This object is used to pass per-page I/O requests to NAND sub-layers. This |
||||
* way all useful information are already formatted in a useful way and |
||||
* specific NAND layers can focus on translating these information into |
||||
* specific commands/operations. |
||||
*/ |
||||
struct nand_page_io_req { |
||||
struct nand_pos pos; |
||||
unsigned int dataoffs; |
||||
unsigned int datalen; |
||||
union { |
||||
const void *out; |
||||
void *in; |
||||
} databuf; |
||||
unsigned int ooboffs; |
||||
unsigned int ooblen; |
||||
union { |
||||
const void *out; |
||||
void *in; |
||||
} oobbuf; |
||||
int mode; |
||||
}; |
||||
|
||||
/**
|
||||
* struct nand_ecc_req - NAND ECC requirements |
||||
* @strength: ECC strength |
||||
* @step_size: ECC step/block size |
||||
*/ |
||||
struct nand_ecc_req { |
||||
unsigned int strength; |
||||
unsigned int step_size; |
||||
}; |
||||
|
||||
#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } |
||||
|
||||
/**
|
||||
* struct nand_bbt - bad block table object |
||||
* @cache: in memory BBT cache |
||||
*/ |
||||
struct nand_bbt { |
||||
unsigned long *cache; |
||||
}; |
||||
|
||||
struct nand_device; |
||||
|
||||
/**
|
||||
* struct nand_ops - NAND operations |
||||
* @erase: erase a specific block. No need to check if the block is bad before |
||||
* erasing, this has been taken care of by the generic NAND layer |
||||
* @markbad: mark a specific block bad. No need to check if the block is |
||||
* already marked bad, this has been taken care of by the generic |
||||
* NAND layer. This method should just write the BBM (Bad Block |
||||
* Marker) so that future call to struct_nand_ops->isbad() return |
||||
* true |
||||
* @isbad: check whether a block is bad or not. This method should just read |
||||
* the BBM and return whether the block is bad or not based on what it |
||||
* reads |
||||
* |
||||
* These are all low level operations that should be implemented by specialized |
||||
* NAND layers (SPI NAND, raw NAND, ...). |
||||
*/ |
||||
struct nand_ops { |
||||
int (*erase)(struct nand_device *nand, const struct nand_pos *pos); |
||||
int (*markbad)(struct nand_device *nand, const struct nand_pos *pos); |
||||
bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos); |
||||
}; |
||||
|
||||
/**
|
||||
* struct nand_device - NAND device |
||||
* @mtd: MTD instance attached to the NAND device |
||||
* @memorg: memory layout |
||||
* @eccreq: ECC requirements |
||||
* @rowconv: position to row address converter |
||||
* @bbt: bad block table info |
||||
* @ops: NAND operations attached to the NAND device |
||||
* |
||||
* Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) |
||||
* should declare their own NAND object embedding a nand_device struct (that's |
||||
* how inheritance is done). |
||||
* struct_nand_device->memorg and struct_nand_device->eccreq should be filled |
||||
* at device detection time to reflect the NAND device |
||||
* capabilities/requirements. Once this is done nanddev_init() can be called. |
||||
* It will take care of converting NAND information into MTD ones, which means |
||||
* the specialized NAND layers should never manually tweak |
||||
* struct_nand_device->mtd except for the ->_read/write() hooks. |
||||
*/ |
||||
struct nand_device { |
||||
struct mtd_info *mtd; |
||||
struct nand_memory_organization memorg; |
||||
struct nand_ecc_req eccreq; |
||||
struct nand_row_converter rowconv; |
||||
struct nand_bbt bbt; |
||||
const struct nand_ops *ops; |
||||
}; |
||||
|
||||
/**
|
||||
* struct nand_io_iter - NAND I/O iterator |
||||
* @req: current I/O request |
||||
* @oobbytes_per_page: maximum number of OOB bytes per page |
||||
* @dataleft: remaining number of data bytes to read/write |
||||
* @oobleft: remaining number of OOB bytes to read/write |
||||
* |
||||
* Can be used by specialized NAND layers to iterate over all pages covered |
||||
* by an MTD I/O request, which should greatly simplifies the boiler-plate |
||||
* code needed to read/write data from/to a NAND device. |
||||
*/ |
||||
struct nand_io_iter { |
||||
struct nand_page_io_req req; |
||||
unsigned int oobbytes_per_page; |
||||
unsigned int dataleft; |
||||
unsigned int oobleft; |
||||
}; |
||||
|
||||
/**
|
||||
* mtd_to_nanddev() - Get the NAND device attached to the MTD instance |
||||
* @mtd: MTD instance |
||||
* |
||||
* Return: the NAND device embedding @mtd. |
||||
*/ |
||||
static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd) |
||||
{ |
||||
return mtd->priv; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_to_mtd() - Get the MTD device attached to a NAND device |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the MTD device embedded in @nand. |
||||
*/ |
||||
static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand) |
||||
{ |
||||
return nand->mtd; |
||||
} |
||||
|
||||
/*
|
||||
* nanddev_bits_per_cell() - Get the number of bits per cell |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the number of bits per cell. |
||||
*/ |
||||
static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) |
||||
{ |
||||
return nand->memorg.bits_per_cell; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_page_size() - Get NAND page size |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the page size. |
||||
*/ |
||||
static inline size_t nanddev_page_size(const struct nand_device *nand) |
||||
{ |
||||
return nand->memorg.pagesize; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_per_page_oobsize() - Get NAND OOB size |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the OOB size. |
||||
*/ |
||||
static inline unsigned int |
||||
nanddev_per_page_oobsize(const struct nand_device *nand) |
||||
{ |
||||
return nand->memorg.oobsize; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the number of pages per eraseblock. |
||||
*/ |
||||
static inline unsigned int |
||||
nanddev_pages_per_eraseblock(const struct nand_device *nand) |
||||
{ |
||||
return nand->memorg.pages_per_eraseblock; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_per_page_oobsize() - Get NAND erase block size |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the eraseblock size. |
||||
*/ |
||||
static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) |
||||
{ |
||||
return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the number of eraseblocks per LUN. |
||||
*/ |
||||
static inline unsigned int |
||||
nanddev_eraseblocks_per_lun(const struct nand_device *nand) |
||||
{ |
||||
return nand->memorg.eraseblocks_per_lun; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_target_size() - Get the total size provided by a single target/die |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the total size exposed by a single target/die in bytes. |
||||
*/ |
||||
static inline u64 nanddev_target_size(const struct nand_device *nand) |
||||
{ |
||||
return (u64)nand->memorg.luns_per_target * |
||||
nand->memorg.eraseblocks_per_lun * |
||||
nand->memorg.pages_per_eraseblock * |
||||
nand->memorg.pagesize; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_ntarget() - Get the total of targets |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the number of targets/dies exposed by @nand. |
||||
*/ |
||||
static inline unsigned int nanddev_ntargets(const struct nand_device *nand) |
||||
{ |
||||
return nand->memorg.ntargets; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_neraseblocks() - Get the total number of erasablocks |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the total number of eraseblocks exposed by @nand. |
||||
*/ |
||||
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) |
||||
{ |
||||
return (u64)nand->memorg.luns_per_target * |
||||
nand->memorg.eraseblocks_per_lun * |
||||
nand->memorg.pages_per_eraseblock; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_size() - Get NAND size |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the total size (in bytes) exposed by @nand. |
||||
*/ |
||||
static inline u64 nanddev_size(const struct nand_device *nand) |
||||
{ |
||||
return nanddev_target_size(nand) * nanddev_ntargets(nand); |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_get_memorg() - Extract memory organization info from a NAND device |
||||
* @nand: NAND device |
||||
* |
||||
* This can be used by the upper layer to fill the memorg info before calling |
||||
* nanddev_init(). |
||||
* |
||||
* Return: the memorg object embedded in the NAND device. |
||||
*/ |
||||
static inline struct nand_memory_organization * |
||||
nanddev_get_memorg(struct nand_device *nand) |
||||
{ |
||||
return &nand->memorg; |
||||
} |
||||
|
||||
int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, |
||||
struct module *owner); |
||||
void nanddev_cleanup(struct nand_device *nand); |
||||
|
||||
/**
|
||||
* nanddev_register() - Register a NAND device |
||||
* @nand: NAND device |
||||
* |
||||
* Register a NAND device. |
||||
* This function is just a wrapper around mtd_device_register() |
||||
* registering the MTD device embedded in @nand. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
static inline int nanddev_register(struct nand_device *nand) |
||||
{ |
||||
return mtd_device_register(nand->mtd, NULL, 0); |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_unregister() - Unregister a NAND device |
||||
* @nand: NAND device |
||||
* |
||||
* Unregister a NAND device. |
||||
* This function is just a wrapper around mtd_device_unregister() |
||||
* unregistering the MTD device embedded in @nand. |
||||
* |
||||
* Return: 0 in case of success, a negative error code otherwise. |
||||
*/ |
||||
static inline int nanddev_unregister(struct nand_device *nand) |
||||
{ |
||||
return mtd_device_unregister(nand->mtd); |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_set_of_node() - Attach a DT node to a NAND device |
||||
* @nand: NAND device |
||||
* @np: DT node |
||||
* |
||||
* Attach a DT node to a NAND device. |
||||
*/ |
||||
static inline void nanddev_set_of_node(struct nand_device *nand, |
||||
const struct device_node *np) |
||||
{ |
||||
mtd_set_of_node(nand->mtd, np); |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_get_of_node() - Retrieve the DT node attached to a NAND device |
||||
* @nand: NAND device |
||||
* |
||||
* Return: the DT node attached to @nand. |
||||
*/ |
||||
static inline const struct device_node *nanddev_get_of_node(struct nand_device *nand) |
||||
{ |
||||
return mtd_get_of_node(nand->mtd); |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position |
||||
* @nand: NAND device |
||||
* @offs: absolute NAND offset (usually passed by the MTD layer) |
||||
* @pos: a NAND position object to fill in |
||||
* |
||||
* Converts @offs into a nand_pos representation. |
||||
* |
||||
* Return: the offset within the NAND page pointed by @pos. |
||||
*/ |
||||
static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, |
||||
loff_t offs, |
||||
struct nand_pos *pos) |
||||
{ |
||||
unsigned int pageoffs; |
||||
u64 tmp = offs; |
||||
|
||||
pageoffs = do_div(tmp, nand->memorg.pagesize); |
||||
pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); |
||||
pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); |
||||
pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; |
||||
pos->lun = do_div(tmp, nand->memorg.luns_per_target); |
||||
pos->target = tmp; |
||||
|
||||
return pageoffs; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_pos_cmp() - Compare two NAND positions |
||||
* @a: First NAND position |
||||
* @b: Second NAND position |
||||
* |
||||
* Compares two NAND positions. |
||||
* |
||||
* Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. |
||||
*/ |
||||
static inline int nanddev_pos_cmp(const struct nand_pos *a, |
||||
const struct nand_pos *b) |
||||
{ |
||||
if (a->target != b->target) |
||||
return a->target < b->target ? -1 : 1; |
||||
|
||||
if (a->lun != b->lun) |
||||
return a->lun < b->lun ? -1 : 1; |
||||
|
||||
if (a->eraseblock != b->eraseblock) |
||||
return a->eraseblock < b->eraseblock ? -1 : 1; |
||||
|
||||
if (a->page != b->page) |
||||
return a->page < b->page ? -1 : 1; |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_pos_to_offs() - Convert a NAND position into an absolute offset |
||||
* @nand: NAND device |
||||
* @pos: the NAND position to convert |
||||
* |
||||
* Converts @pos NAND position into an absolute offset. |
||||
* |
||||
* Return: the absolute offset. Note that @pos points to the beginning of a |
||||
* page, if one wants to point to a specific offset within this page |
||||
* the returned offset has to be adjusted manually. |
||||
*/ |
||||
static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, |
||||
const struct nand_pos *pos) |
||||
{ |
||||
unsigned int npages; |
||||
|
||||
npages = pos->page + |
||||
((pos->eraseblock + |
||||
(pos->lun + |
||||
(pos->target * nand->memorg.luns_per_target)) * |
||||
nand->memorg.eraseblocks_per_lun) * |
||||
nand->memorg.pages_per_eraseblock); |
||||
|
||||
return (loff_t)npages * nand->memorg.pagesize; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_pos_to_row() - Extract a row address from a NAND position |
||||
* @nand: NAND device |
||||
* @pos: the position to convert |
||||
* |
||||
* Converts a NAND position into a row address that can then be passed to the |
||||
* device. |
||||
* |
||||
* Return: the row address extracted from @pos. |
||||
*/ |
||||
static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, |
||||
const struct nand_pos *pos) |
||||
{ |
||||
return (pos->lun << nand->rowconv.lun_addr_shift) | |
||||
(pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | |
||||
pos->page; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_pos_next_target() - Move a position to the next target/die |
||||
* @nand: NAND device |
||||
* @pos: the position to update |
||||
* |
||||
* Updates @pos to point to the start of the next target/die. Useful when you |
||||
* want to iterate over all targets/dies of a NAND device. |
||||
*/ |
||||
static inline void nanddev_pos_next_target(struct nand_device *nand, |
||||
struct nand_pos *pos) |
||||
{ |
||||
pos->page = 0; |
||||
pos->plane = 0; |
||||
pos->eraseblock = 0; |
||||
pos->lun = 0; |
||||
pos->target++; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_pos_next_lun() - Move a position to the next LUN |
||||
* @nand: NAND device |
||||
* @pos: the position to update |
||||
* |
||||
* Updates @pos to point to the start of the next LUN. Useful when you want to |
||||
* iterate over all LUNs of a NAND device. |
||||
*/ |
||||
static inline void nanddev_pos_next_lun(struct nand_device *nand, |
||||
struct nand_pos *pos) |
||||
{ |
||||
if (pos->lun >= nand->memorg.luns_per_target - 1) |
||||
return nanddev_pos_next_target(nand, pos); |
||||
|
||||
pos->lun++; |
||||
pos->page = 0; |
||||
pos->plane = 0; |
||||
pos->eraseblock = 0; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_pos_next_eraseblock() - Move a position to the next eraseblock |
||||
* @nand: NAND device |
||||
* @pos: the position to update |
||||
* |
||||
* Updates @pos to point to the start of the next eraseblock. Useful when you |
||||
* want to iterate over all eraseblocks of a NAND device. |
||||
*/ |
||||
static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, |
||||
struct nand_pos *pos) |
||||
{ |
||||
if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) |
||||
return nanddev_pos_next_lun(nand, pos); |
||||
|
||||
pos->eraseblock++; |
||||
pos->page = 0; |
||||
pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_pos_next_eraseblock() - Move a position to the next page |
||||
* @nand: NAND device |
||||
* @pos: the position to update |
||||
* |
||||
* Updates @pos to point to the start of the next page. Useful when you want to |
||||
* iterate over all pages of a NAND device. |
||||
*/ |
||||
static inline void nanddev_pos_next_page(struct nand_device *nand, |
||||
struct nand_pos *pos) |
||||
{ |
||||
if (pos->page >= nand->memorg.pages_per_eraseblock - 1) |
||||
return nanddev_pos_next_eraseblock(nand, pos); |
||||
|
||||
pos->page++; |
||||
} |
||||
|
||||
/**
|
||||
* nand_io_iter_init - Initialize a NAND I/O iterator |
||||
* @nand: NAND device |
||||
* @offs: absolute offset |
||||
* @req: MTD request |
||||
* @iter: NAND I/O iterator |
||||
* |
||||
* Initializes a NAND iterator based on the information passed by the MTD |
||||
* layer. |
||||
*/ |
||||
static inline void nanddev_io_iter_init(struct nand_device *nand, |
||||
loff_t offs, struct mtd_oob_ops *req, |
||||
struct nand_io_iter *iter) |
||||
{ |
||||
struct mtd_info *mtd = nanddev_to_mtd(nand); |
||||
|
||||
iter->req.mode = req->mode; |
||||
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); |
||||
iter->req.ooboffs = req->ooboffs; |
||||
iter->oobbytes_per_page = mtd_oobavail(mtd, req); |
||||
iter->dataleft = req->len; |
||||
iter->oobleft = req->ooblen; |
||||
iter->req.databuf.in = req->datbuf; |
||||
iter->req.datalen = min_t(unsigned int, |
||||
nand->memorg.pagesize - iter->req.dataoffs, |
||||
iter->dataleft); |
||||
iter->req.oobbuf.in = req->oobbuf; |
||||
iter->req.ooblen = min_t(unsigned int, |
||||
iter->oobbytes_per_page - iter->req.ooboffs, |
||||
iter->oobleft); |
||||
} |
||||
|
||||
/**
|
||||
* nand_io_iter_next_page - Move to the next page |
||||
* @nand: NAND device |
||||
* @iter: NAND I/O iterator |
||||
* |
||||
* Updates the @iter to point to the next page. |
||||
*/ |
||||
static inline void nanddev_io_iter_next_page(struct nand_device *nand, |
||||
struct nand_io_iter *iter) |
||||
{ |
||||
nanddev_pos_next_page(nand, &iter->req.pos); |
||||
iter->dataleft -= iter->req.datalen; |
||||
iter->req.databuf.in += iter->req.datalen; |
||||
iter->oobleft -= iter->req.ooblen; |
||||
iter->req.oobbuf.in += iter->req.ooblen; |
||||
iter->req.dataoffs = 0; |
||||
iter->req.ooboffs = 0; |
||||
iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize, |
||||
iter->dataleft); |
||||
iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page, |
||||
iter->oobleft); |
||||
} |
||||
|
||||
/**
|
||||
* nand_io_iter_end - Should end iteration or not |
||||
* @nand: NAND device |
||||
* @iter: NAND I/O iterator |
||||
* |
||||
* Check whether @iter has reached the end of the NAND portion it was asked to |
||||
* iterate on or not. |
||||
* |
||||
* Return: true if @iter has reached the end of the iteration request, false |
||||
* otherwise. |
||||
*/ |
||||
static inline bool nanddev_io_iter_end(struct nand_device *nand, |
||||
const struct nand_io_iter *iter) |
||||
{ |
||||
if (iter->dataleft || iter->oobleft) |
||||
return false; |
||||
|
||||
return true; |
||||
} |
||||
|
||||
/**
|
||||
* nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O |
||||
* request |
||||
* @nand: NAND device |
||||
* @start: start address to read/write from |
||||
* @req: MTD I/O request |
||||
* @iter: NAND I/O iterator |
||||
* |
||||
* Should be used for iterate over pages that are contained in an MTD request. |
||||
*/ |
||||
#define nanddev_io_for_each_page(nand, start, req, iter) \ |
||||
for (nanddev_io_iter_init(nand, start, req, iter); \
|
||||
!nanddev_io_iter_end(nand, iter); \
|
||||
nanddev_io_iter_next_page(nand, iter)) |
||||
|
||||
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); |
||||
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); |
||||
int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); |
||||
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); |
||||
|
||||
/* BBT related functions */ |
||||
enum nand_bbt_block_status { |
||||
NAND_BBT_BLOCK_STATUS_UNKNOWN, |
||||
NAND_BBT_BLOCK_GOOD, |
||||
NAND_BBT_BLOCK_WORN, |
||||
NAND_BBT_BLOCK_RESERVED, |
||||
NAND_BBT_BLOCK_FACTORY_BAD, |
||||
NAND_BBT_BLOCK_NUM_STATUS, |
||||
}; |
||||
|
||||
int nanddev_bbt_init(struct nand_device *nand); |
||||
void nanddev_bbt_cleanup(struct nand_device *nand); |
||||
int nanddev_bbt_update(struct nand_device *nand); |
||||
int nanddev_bbt_get_block_status(const struct nand_device *nand, |
||||
unsigned int entry); |
||||
int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, |
||||
enum nand_bbt_block_status status); |
||||
int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block); |
||||
|
||||
/**
|
||||
* nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry |
||||
* @nand: NAND device |
||||
* @pos: the NAND position we want to get BBT entry for |
||||
* |
||||
* Return the BBT entry used to store information about the eraseblock pointed |
||||
* by @pos. |
||||
* |
||||
* Return: the BBT entry storing information about eraseblock pointed by @pos. |
||||
*/ |
||||
static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand, |
||||
const struct nand_pos *pos) |
||||
{ |
||||
return pos->eraseblock + |
||||
((pos->lun + (pos->target * nand->memorg.luns_per_target)) * |
||||
nand->memorg.eraseblocks_per_lun); |
||||
} |
||||
|
||||
/**
|
||||
* nanddev_bbt_is_initialized() - Check if the BBT has been initialized |
||||
* @nand: NAND device |
||||
* |
||||
* Return: true if the BBT has been initialized, false otherwise. |
||||
*/ |
||||
static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) |
||||
{ |
||||
return !!nand->bbt.cache; |
||||
} |
||||
|
||||
/* MTD -> NAND helper functions. */ |
||||
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); |
||||
|
||||
#endif /* __LINUX_MTD_NAND_H */ |
@ -0,0 +1,432 @@ |
||||
/* SPDX-License-Identifier: GPL-2.0 */ |
||||
/*
|
||||
* Copyright (c) 2016-2017 Micron Technology, Inc. |
||||
* |
||||
* Authors: |
||||
* Peter Pan <peterpandong@micron.com> |
||||
*/ |
||||
#ifndef __LINUX_MTD_SPINAND_H |
||||
#define __LINUX_MTD_SPINAND_H |
||||
|
||||
#ifndef __UBOOT__ |
||||
#include <linux/mutex.h> |
||||
#include <linux/bitops.h> |
||||
#include <linux/device.h> |
||||
#include <linux/mtd/mtd.h> |
||||
#include <linux/mtd/nand.h> |
||||
#include <linux/spi/spi.h> |
||||
#include <linux/spi/spi-mem.h> |
||||
#else |
||||
#include <common.h> |
||||
#include <spi.h> |
||||
#include <spi-mem.h> |
||||
#include <linux/mtd/nand.h> |
||||
#endif |
||||
|
||||
/**
|
||||
* Standard SPI NAND flash operations |
||||
*/ |
||||
|
||||
#define SPINAND_RESET_OP \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
|
||||
SPI_MEM_OP_NO_ADDR, \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA) |
||||
|
||||
#define SPINAND_WR_EN_DIS_OP(enable) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
|
||||
SPI_MEM_OP_NO_ADDR, \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA) |
||||
|
||||
#define SPINAND_READID_OP(ndummy, buf, len) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
|
||||
SPI_MEM_OP_NO_ADDR, \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 1)) |
||||
|
||||
#define SPINAND_SET_FEATURE_OP(reg, valptr) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
|
||||
SPI_MEM_OP_ADDR(1, reg, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_DATA_OUT(1, valptr, 1)) |
||||
|
||||
#define SPINAND_GET_FEATURE_OP(reg, valptr) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
|
||||
SPI_MEM_OP_ADDR(1, reg, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_DATA_IN(1, valptr, 1)) |
||||
|
||||
#define SPINAND_BLK_ERASE_OP(addr) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
|
||||
SPI_MEM_OP_ADDR(3, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA) |
||||
|
||||
#define SPINAND_PAGE_READ_OP(addr) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
|
||||
SPI_MEM_OP_ADDR(3, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA) |
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 1)) |
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 2)) |
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 1), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 4)) |
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 2), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 2), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 2)) |
||||
|
||||
#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 4), \
|
||||
SPI_MEM_OP_DUMMY(ndummy, 4), \
|
||||
SPI_MEM_OP_DATA_IN(len, buf, 4)) |
||||
|
||||
#define SPINAND_PROG_EXEC_OP(addr) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
|
||||
SPI_MEM_OP_ADDR(3, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_NO_DATA) |
||||
|
||||
#define SPINAND_PROG_LOAD(reset, addr, buf, len) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_DATA_OUT(len, buf, 1)) |
||||
|
||||
#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \ |
||||
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
|
||||
SPI_MEM_OP_ADDR(2, addr, 1), \
|
||||
SPI_MEM_OP_NO_DUMMY, \
|
||||
SPI_MEM_OP_DATA_OUT(len, buf, 4)) |
||||
|
||||
/**
|
||||
* Standard SPI NAND flash commands |
||||
*/ |
||||
#define SPINAND_CMD_PROG_LOAD_X4 0x32 |
||||
#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34 |
||||
|
||||
/* feature register */ |
||||
#define REG_BLOCK_LOCK 0xa0 |
||||
#define BL_ALL_UNLOCKED 0x00 |
||||
|
||||
/* configuration register */ |
||||
#define REG_CFG 0xb0 |
||||
#define CFG_OTP_ENABLE BIT(6) |
||||
#define CFG_ECC_ENABLE BIT(4) |
||||
#define CFG_QUAD_ENABLE BIT(0) |
||||
|
||||
/* status register */ |
||||
#define REG_STATUS 0xc0 |
||||
#define STATUS_BUSY BIT(0) |
||||
#define STATUS_ERASE_FAILED BIT(2) |
||||
#define STATUS_PROG_FAILED BIT(3) |
||||
#define STATUS_ECC_MASK GENMASK(5, 4) |
||||
#define STATUS_ECC_NO_BITFLIPS (0 << 4) |
||||
#define STATUS_ECC_HAS_BITFLIPS (1 << 4) |
||||
#define STATUS_ECC_UNCOR_ERROR (2 << 4) |
||||
|
||||
struct spinand_op; |
||||
struct spinand_device; |
||||
|
||||
#define SPINAND_MAX_ID_LEN 4 |
||||
|
||||
/**
|
||||
* struct spinand_id - SPI NAND id structure |
||||
* @data: buffer containing the id bytes. Currently 4 bytes large, but can |
||||
* be extended if required |
||||
* @len: ID length |
||||
* |
||||
* struct_spinand_id->data contains all bytes returned after a READ_ID command, |
||||
* including dummy bytes if the chip does not emit ID bytes right after the |
||||
* READ_ID command. The responsibility to extract real ID bytes is left to |
||||
* struct_manufacurer_ops->detect(). |
||||
*/ |
||||
struct spinand_id { |
||||
u8 data[SPINAND_MAX_ID_LEN]; |
||||
int len; |
||||
}; |
||||
|
||||
/**
|
||||
* struct manufacurer_ops - SPI NAND manufacturer specific operations |
||||
* @detect: detect a SPI NAND device. Every time a SPI NAND device is probed |
||||
* the core calls the struct_manufacurer_ops->detect() hook of each |
||||
* registered manufacturer until one of them return 1. Note that |
||||
* the first thing to check in this hook is that the manufacturer ID |
||||
* in struct_spinand_device->id matches the manufacturer whose |
||||
* ->detect() hook has been called. Should return 1 if there's a |
||||
* match, 0 if the manufacturer ID does not match and a negative |
||||
* error code otherwise. When true is returned, the core assumes |
||||
* that properties of the NAND chip (spinand->base.memorg and |
||||
* spinand->base.eccreq) have been filled |
||||
* @init: initialize a SPI NAND device |
||||
* @cleanup: cleanup a SPI NAND device |
||||
* |
||||
* Each SPI NAND manufacturer driver should implement this interface so that |
||||
* NAND chips coming from this vendor can be detected and initialized properly. |
||||
*/ |
||||
struct spinand_manufacturer_ops { |
||||
int (*detect)(struct spinand_device *spinand); |
||||
int (*init)(struct spinand_device *spinand); |
||||
void (*cleanup)(struct spinand_device *spinand); |
||||
}; |
||||
|
||||
/**
|
||||
* struct spinand_manufacturer - SPI NAND manufacturer instance |
||||
* @id: manufacturer ID |
||||
* @name: manufacturer name |
||||
* @ops: manufacturer operations |
||||
*/ |
||||
struct spinand_manufacturer { |
||||
u8 id; |
||||
char *name; |
||||
const struct spinand_manufacturer_ops *ops; |
||||
}; |
||||
|
||||
/* SPI NAND manufacturers */ |
||||
extern const struct spinand_manufacturer macronix_spinand_manufacturer; |
||||
extern const struct spinand_manufacturer micron_spinand_manufacturer; |
||||
extern const struct spinand_manufacturer winbond_spinand_manufacturer; |
||||
|
||||
/**
|
||||
* struct spinand_op_variants - SPI NAND operation variants |
||||
* @ops: the list of variants for a given operation |
||||
* @nops: the number of variants |
||||
* |
||||
* Some operations like read-from-cache/write-to-cache have several variants |
||||
* depending on the number of IO lines you use to transfer data or address |
||||
* cycles. This structure is a way to describe the different variants supported |
||||
* by a chip and let the core pick the best one based on the SPI mem controller |
||||
* capabilities. |
||||
*/ |
||||
struct spinand_op_variants { |
||||
const struct spi_mem_op *ops; |
||||
unsigned int nops; |
||||
}; |
||||
|
||||
#define SPINAND_OP_VARIANTS(name, ...) \ |
||||
const struct spinand_op_variants name = { \
|
||||
.ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
|
||||
.nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
|
||||
sizeof(struct spi_mem_op), \
|
||||
} |
||||
|
||||
/**
|
||||
* spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND |
||||
* chip |
||||
* @get_status: get the ECC status. Should return a positive number encoding |
||||
* the number of corrected bitflips if correction was possible or |
||||
* -EBADMSG if there are uncorrectable errors. I can also return |
||||
* other negative error codes if the error is not caused by |
||||
* uncorrectable bitflips |
||||
* @ooblayout: the OOB layout used by the on-die ECC implementation |
||||
*/ |
||||
struct spinand_ecc_info { |
||||
int (*get_status)(struct spinand_device *spinand, u8 status); |
||||
const struct mtd_ooblayout_ops *ooblayout; |
||||
}; |
||||
|
||||
#define SPINAND_HAS_QE_BIT BIT(0) |
||||
|
||||
/**
|
||||
* struct spinand_info - Structure used to describe SPI NAND chips |
||||
* @model: model name |
||||
* @devid: device ID |
||||
* @flags: OR-ing of the SPINAND_XXX flags |
||||
* @memorg: memory organization |
||||
* @eccreq: ECC requirements |
||||
* @eccinfo: on-die ECC info |
||||
* @op_variants: operations variants |
||||
* @op_variants.read_cache: variants of the read-cache operation |
||||
* @op_variants.write_cache: variants of the write-cache operation |
||||
* @op_variants.update_cache: variants of the update-cache operation |
||||
* @select_target: function used to select a target/die. Required only for |
||||
* multi-die chips |
||||
* |
||||
* Each SPI NAND manufacturer driver should have a spinand_info table |
||||
* describing all the chips supported by the driver. |
||||
*/ |
||||
struct spinand_info { |
||||
const char *model; |
||||
u8 devid; |
||||
u32 flags; |
||||
struct nand_memory_organization memorg; |
||||
struct nand_ecc_req eccreq; |
||||
struct spinand_ecc_info eccinfo; |
||||
struct { |
||||
const struct spinand_op_variants *read_cache; |
||||
const struct spinand_op_variants *write_cache; |
||||
const struct spinand_op_variants *update_cache; |
||||
} op_variants; |
||||
int (*select_target)(struct spinand_device *spinand, |
||||
unsigned int target); |
||||
}; |
||||
|
||||
#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \ |
||||
{ \
|
||||
.read_cache = __read, \
|
||||
.write_cache = __write, \
|
||||
.update_cache = __update, \
|
||||
} |
||||
|
||||
#define SPINAND_ECCINFO(__ooblayout, __get_status) \ |
||||
.eccinfo = { \
|
||||
.ooblayout = __ooblayout, \
|
||||
.get_status = __get_status, \
|
||||
} |
||||
|
||||
#define SPINAND_SELECT_TARGET(__func) \ |
||||
.select_target = __func, |
||||
|
||||
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \ |
||||
__flags, ...) \
|
||||
{ \
|
||||
.model = __model, \
|
||||
.devid = __id, \
|
||||
.memorg = __memorg, \
|
||||
.eccreq = __eccreq, \
|
||||
.op_variants = __op_variants, \
|
||||
.flags = __flags, \
|
||||
__VA_ARGS__ \
|
||||
} |
||||
|
||||
/**
|
||||
* struct spinand_device - SPI NAND device instance |
||||
* @base: NAND device instance |
||||
* @slave: pointer to the SPI slave object |
||||
* @lock: lock used to serialize accesses to the NAND |
||||
* @id: NAND ID as returned by READ_ID |
||||
* @flags: NAND flags |
||||
* @op_templates: various SPI mem op templates |
||||
* @op_templates.read_cache: read cache op template |
||||
* @op_templates.write_cache: write cache op template |
||||
* @op_templates.update_cache: update cache op template |
||||
* @select_target: select a specific target/die. Usually called before sending |
||||
* a command addressing a page or an eraseblock embedded in |
||||
* this die. Only required if your chip exposes several dies |
||||
* @cur_target: currently selected target/die |
||||
* @eccinfo: on-die ECC information |
||||
* @cfg_cache: config register cache. One entry per die |
||||
* @databuf: bounce buffer for data |
||||
* @oobbuf: bounce buffer for OOB data |
||||
* @scratchbuf: buffer used for everything but page accesses. This is needed |
||||
* because the spi-mem interface explicitly requests that buffers |
||||
* passed in spi_mem_op be DMA-able, so we can't based the bufs on |
||||
* the stack |
||||
* @manufacturer: SPI NAND manufacturer information |
||||
* @priv: manufacturer private data |
||||
*/ |
||||
struct spinand_device { |
||||
struct nand_device base; |
||||
#ifndef __UBOOT__ |
||||
struct spi_mem *spimem; |
||||
struct mutex lock; |
||||
#else |
||||
struct spi_slave *slave; |
||||
#endif |
||||
struct spinand_id id; |
||||
u32 flags; |
||||
|
||||
struct { |
||||
const struct spi_mem_op *read_cache; |
||||
const struct spi_mem_op *write_cache; |
||||
const struct spi_mem_op *update_cache; |
||||
} op_templates; |
||||
|
||||
int (*select_target)(struct spinand_device *spinand, |
||||
unsigned int target); |
||||
unsigned int cur_target; |
||||
|
||||
struct spinand_ecc_info eccinfo; |
||||
|
||||
u8 *cfg_cache; |
||||
u8 *databuf; |
||||
u8 *oobbuf; |
||||
u8 *scratchbuf; |
||||
const struct spinand_manufacturer *manufacturer; |
||||
void *priv; |
||||
}; |
||||
|
||||
/**
|
||||
* mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance |
||||
* @mtd: MTD instance |
||||
* |
||||
* Return: the SPI NAND device attached to @mtd. |
||||
*/ |
||||
static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd) |
||||
{ |
||||
return container_of(mtd_to_nanddev(mtd), struct spinand_device, base); |
||||
} |
||||
|
||||
/**
|
||||
* spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device |
||||
* @spinand: SPI NAND device |
||||
* |
||||
* Return: the MTD device embedded in @spinand. |
||||
*/ |
||||
static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand) |
||||
{ |
||||
return nanddev_to_mtd(&spinand->base); |
||||
} |
||||
|
||||
/**
|
||||
* nand_to_spinand() - Get the SPI NAND device embedding an NAND object |
||||
* @nand: NAND object |
||||
* |
||||
* Return: the SPI NAND device embedding @nand. |
||||
*/ |
||||
static inline struct spinand_device *nand_to_spinand(struct nand_device *nand) |
||||
{ |
||||
return container_of(nand, struct spinand_device, base); |
||||
} |
||||
|
||||
/**
|
||||
* spinand_to_nand() - Get the NAND device embedded in a SPI NAND object |
||||
* @spinand: SPI NAND device |
||||
* |
||||
* Return: the NAND device embedded in @spinand. |
||||
*/ |
||||
static inline struct nand_device * |
||||
spinand_to_nand(struct spinand_device *spinand) |
||||
{ |
||||
return &spinand->base; |
||||
} |
||||
|
||||
/**
|
||||
* spinand_set_of_node - Attach a DT node to a SPI NAND device |
||||
* @spinand: SPI NAND device |
||||
* @np: DT node |
||||
* |
||||
* Attach a DT node to a SPI NAND device. |
||||
*/ |
||||
static inline void spinand_set_of_node(struct spinand_device *spinand, |
||||
const struct device_node *np) |
||||
{ |
||||
nanddev_set_of_node(&spinand->base, np); |
||||
} |
||||
|
||||
int spinand_match_and_init(struct spinand_device *dev, |
||||
const struct spinand_info *table, |
||||
unsigned int table_size, u8 devid); |
||||
|
||||
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); |
||||
int spinand_select_target(struct spinand_device *spinand, unsigned int target); |
||||
|
||||
#endif /* __LINUX_MTD_SPINAND_H */ |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue