Merge git://git.denx.de/u-boot-marvell

- Multiples updates to the turris boards / platform
- Changes / enhancements to the Marvell PHY drivers, mainly
  to support the turris platform
- Many fixes and enhancements to the pxa3xx NAND driver
- Fixes for the UART boot mode in kwboot
- Misc minor changes to other 32bit and 64bit boards
lime2-spi
Tom Rini 6 years ago
commit 333279af23
  1. 3
      MAINTAINERS
  2. 17
      arch/arm/dts/armada-3720-turris-mox.dts
  3. 8
      arch/arm/dts/armada-8040-mcbin.dts
  4. 24
      arch/arm/mach-mvebu/armada8k/cpu.c
  5. 8
      arch/arm/mach-mvebu/include/mach/soc.h
  6. 15
      arch/arm/mach-mvebu/spl.c
  7. 88
      board/CZ.NIC/turris_mox/turris_mox.c
  8. 3
      cmd/Makefile
  9. 6
      configs/turris_mox_defconfig
  10. 2
      doc/git-mailrc
  11. 130
      drivers/clk/mvebu/armada-37xx-periph.c
  12. 447
      drivers/mtd/nand/pxa3xx_nand.c
  13. 2
      drivers/phy/marvell/comphy_a3700.h
  14. 12
      drivers/phy/marvell/comphy_core.c
  15. 16
      drivers/phy/marvell/comphy_core.h
  16. 21
      drivers/phy/marvell/comphy_cp110.c
  17. 6
      drivers/phy/marvell/comphy_hpipe.h
  18. 2
      drivers/phy/marvell/comphy_mux.c
  19. 3
      env/sf.c
  20. 4
      include/configs/clearfog.h
  21. 6
      include/configs/db-88f6820-gp.h
  22. 3
      include/configs/mvebu_armada-8k.h
  23. 1
      include/configs/nas220.h
  24. 22
      include/mvebu/comphy.h
  25. 14
      tools/kwboot.c

@ -139,7 +139,7 @@ S: Maintained
F: arch/arm/cpu/armv8/hisilicon
F: arch/arm/include/asm/arch-hi6220/
ARM MARVELL KIRKWOOD ARMADA-XP ARMADA-38X
ARM MARVELL KIRKWOOD ARMADA-XP ARMADA-38X ARMADA-37XX
M: Prafulla Wadaskar <prafulla@marvell.com>
M: Luka Perkov <luka.perkov@sartura.hr>
M: Stefan Roese <sr@denx.de>
@ -148,6 +148,7 @@ T: git git://git.denx.de/u-boot-marvell.git
F: arch/arm/mach-kirkwood/
F: arch/arm/mach-mvebu/
F: drivers/ata/ahci_mvebu.c
F: drivers/phy/marvell/
ARM MARVELL PXA
M: Marek Vasut <marex@denx.de>

@ -94,17 +94,13 @@
};
};
&pinctrl_sb {
smi_pins: smi-pins {
groups = "smi";
function = "smi";
};
};
&spi0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&spi_cs1_pins>;
assigned-clocks = <&nb_periph_clk 7>;
assigned-clock-parents = <&tbg 1>;
assigned-clock-rates = <20000000>;
spi-flash@0 {
#address-cells = <1>;
@ -130,3 +126,10 @@
vbus-supply = <&reg_usb3_vbus>;
status = "okay";
};
&pcie0 {
pinctrl-names = "default";
pinctrl-0 = <&pcie_pins>;
reset-gpio = <&gpiosb 3 GPIO_ACTIVE_HIGH>;
status = "disabled";
};

@ -154,14 +154,6 @@
status = "okay";
};
/* uSD slot */
&cpm_sdhci0 {
pinctrl-names = "default";
pinctrl-0 = <&cpm_sdhci_pins>;
bus-width = <4>;
status = "okay";
};
&cpm_comphy {
/*
* CP0 Serdes Configuration:

@ -18,6 +18,10 @@
#define RFU_GLOBAL_SW_RST (MVEBU_RFU_BASE + 0x84)
#define RFU_SW_RESET_OFFSET 0
#define SAR0_REG (MVEBU_REGISTER(0x2400200))
#define BOOT_MODE_MASK 0x3f
#define BOOT_MODE_OFFSET 4
/*
* The following table includes all memory regions for Armada 7k and
* 8k SoCs. The Armada 7k is missing the CP110 slave regions here. Lets
@ -125,3 +129,23 @@ u32 mvebu_get_nand_clock(void)
else
return 250 * 1000000;
}
int mmc_get_env_dev(void)
{
u32 reg;
unsigned int boot_mode;
reg = readl(SAR0_REG);
boot_mode = (reg >> BOOT_MODE_OFFSET) & BOOT_MODE_MASK;
switch (boot_mode) {
case 0x28:
case 0x2a:
return 0;
case 0x29:
case 0x2b:
return 1;
}
return CONFIG_SYS_MMC_ENV_DEV;
}

@ -110,16 +110,12 @@
#define COMPHY_REFCLK_ALIGNMENT (MVEBU_REGISTER(0x182f8))
/* BootROM error register (also includes some status infos) */
#if defined(CONFIG_ARMADA_38X)
#define CONFIG_BOOTROM_ERR_REG (MVEBU_REGISTER(0x182d0))
#define BOOTROM_ERR_MODE_OFFS 0
#define BOOTROM_ERR_MODE_MASK (0xf << BOOTROM_ERR_MODE_OFFS)
#else
#define CONFIG_BOOTROM_ERR_REG (MVEBU_REGISTER(0x182d0))
#define BOOTROM_ERR_MODE_OFFS 28
#define BOOTROM_ERR_MODE_MASK (0xf << BOOTROM_ERR_MODE_OFFS)
#define BOOTROM_ERR_MODE_UART 0x6
#endif
#define BOOTROM_ERR_CODE_OFFS 0
#define BOOTROM_ERR_CODE_MASK (0xf << BOOTROM_ERR_CODE_OFFS)
#if defined(CONFIG_ARMADA_375)
/* SAR values for Armada 375 */

@ -25,17 +25,18 @@ static u32 get_boot_device(void)
val = readl(CONFIG_BOOTROM_ERR_REG);
boot_device = (val & BOOTROM_ERR_MODE_MASK) >> BOOTROM_ERR_MODE_OFFS;
debug("BOOTROM_REG=0x%08x boot_device=0x%x\n", val, boot_device);
#if defined(CONFIG_ARMADA_38X)
if (boot_device == BOOTROM_ERR_MODE_UART)
return BOOT_DEVICE_UART;
#ifdef CONFIG_ARMADA_38X
/*
* If the bootrom error register contains any else than zeros
* in the first 8 bits it's an error condition. And in that case
* try to boot from UART.
* If the bootrom error code contains any other than zeros it's an
* error condition and the bootROM has fallen back to UART boot
*/
boot_device = (val & BOOTROM_ERR_CODE_MASK) >> BOOTROM_ERR_CODE_OFFS;
if (boot_device)
#else
if (boot_device == BOOTROM_ERR_MODE_UART)
#endif
return BOOT_DEVICE_UART;
#endif
/*
* Now check the SAR register for the strapped boot-device

@ -4,18 +4,100 @@
*/
#include <common.h>
#include <asm/io.h>
#include <dm.h>
#include <clk.h>
#include <spi.h>
#include <linux/string.h>
#include <linux/libfdt.h>
#include <fdt_support.h>
#ifdef CONFIG_WDT_ARMADA_3720
#ifdef CONFIG_WDT_ARMADA_37XX
#include <wdt.h>
#endif
#define MAX_MOX_MODULES 10
#define MOX_MODULE_SFP 0x1
#define MOX_MODULE_PCI 0x2
#define MOX_MODULE_TOPAZ 0x3
#define MOX_MODULE_PERIDOT 0x4
#define MOX_MODULE_USB3 0x5
#define MOX_MODULE_PASSPCI 0x6
#define ARMADA_37XX_NB_GPIO_SEL 0xd0013830
#define ARMADA_37XX_SPI_CTRL 0xd0010600
#define ARMADA_37XX_SPI_CFG 0xd0010604
#define ARMADA_37XX_SPI_DOUT 0xd0010608
#define ARMADA_37XX_SPI_DIN 0xd001060c
#define PCIE_PATH "/soc/pcie@d0070000"
DECLARE_GLOBAL_DATA_PTR;
#ifdef CONFIG_WDT_ARMADA_3720
#if defined(CONFIG_OF_BOARD_FIXUP)
int board_fix_fdt(void *blob)
{
u8 topology[MAX_MOX_MODULES];
int i, size, node;
bool enable;
/*
* SPI driver is not loaded in driver model yet, but we have to find out
* if pcie should be enabled in U-Boot's device tree. Therefore we have
* to read SPI by reading/writing SPI registers directly
*/
writel(0x563fa, ARMADA_37XX_NB_GPIO_SEL);
writel(0x10df, ARMADA_37XX_SPI_CFG);
writel(0x2005b, ARMADA_37XX_SPI_CTRL);
while (!(readl(ARMADA_37XX_SPI_CTRL) & 0x2))
udelay(1);
for (i = 0; i < MAX_MOX_MODULES; ++i) {
writel(0x0, ARMADA_37XX_SPI_DOUT);
while (!(readl(ARMADA_37XX_SPI_CTRL) & 0x2))
udelay(1);
topology[i] = readl(ARMADA_37XX_SPI_DIN) & 0xff;
if (topology[i] == 0xff)
break;
topology[i] &= 0xf;
}
size = i;
writel(0x5b, ARMADA_37XX_SPI_CTRL);
if (size > 1 && (topology[1] == MOX_MODULE_PCI ||
topology[1] == MOX_MODULE_USB3 ||
topology[1] == MOX_MODULE_PASSPCI))
enable = true;
else
enable = false;
node = fdt_path_offset(blob, PCIE_PATH);
if (node < 0) {
printf("Cannot find PCIe node in U-Boot's device tree!\n");
return 0;
}
if (fdt_setprop_string(blob, node, "status",
enable ? "okay" : "disabled") < 0) {
printf("Cannot %s PCIe in U-Boot's device tree!\n",
enable ? "enable" : "disable");
return 0;
}
return 0;
}
#endif
#ifdef CONFIG_WDT_ARMADA_37XX
static struct udevice *watchdog_dev;
void watchdog_reset(void)
@ -41,7 +123,7 @@ int board_init(void)
/* address of boot parameters */
gd->bd->bi_boot_params = CONFIG_SYS_SDRAM_BASE + 0x100;
#ifdef CONFIG_WDT_ARMADA_3720
#ifdef CONFIG_WDT_ARMADA_37XX
if (uclass_get_device(UCLASS_WDT, 0, &watchdog_dev)) {
printf("Cannot find Armada 3720 watchdog!\n");
} else {

@ -163,12 +163,13 @@ obj-$(CONFIG_CMD_BLOB) += blob.o
obj-$(CONFIG_CMD_AVB) += avb.o
obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_ARCH_MVEBU) += mvebu/
endif # !CONFIG_SPL_BUILD
# core command
obj-y += nvedit.o
obj-$(CONFIG_ARCH_MVEBU) += mvebu/
obj-$(CONFIG_TI_COMMON_CMD_OPTIONS) += ti/
filechk_data_gz = (echo "static const char data_gz[] ="; cat $< | scripts/bin2c; echo ";")

@ -13,10 +13,12 @@ CONFIG_SYS_CONSOLE_INFO_QUIET=y
# CONFIG_DISPLAY_CPUINFO is not set
# CONFIG_DISPLAY_BOARDINFO is not set
CONFIG_ARCH_EARLY_INIT_R=y
CONFIG_OF_BOARD_FIXUP=y
CONFIG_CMD_CLK=y
# CONFIG_CMD_FLASH is not set
CONFIG_CMD_I2C=y
CONFIG_CMD_MMC=y
CONFIG_CMD_PCI=y
CONFIG_CMD_SF=y
CONFIG_CMD_SPI=y
CONFIG_CMD_USB=y
@ -51,6 +53,10 @@ CONFIG_MVEBU_COMPHY_SUPPORT=y
CONFIG_PINCTRL=y
CONFIG_PINCTRL_ARMADA_37XX=y
CONFIG_DM_REGULATOR_FIXED=y
CONFIG_PCI=y
CONFIG_DM_PCI=y
CONFIG_PCI_AARDVARK=y
# CONFIG_PCI_PNP is not set
# CONFIG_SPL_SERIAL_PRESENT is not set
CONFIG_DEBUG_MVEBU_A3700_UART=y
CONFIG_DEBUG_UART_SHIFT=2

@ -56,7 +56,7 @@ alias arm uboot, aaribaud, trini
alias at91 uboot, abiessmann
alias davinci ti
alias imx uboot, sbabic
alias kirkwood uboot, prafulla, luka
alias kirkwood uboot, prafulla, luka, stroese
alias omap ti
alias pxa uboot, marex
alias rmobile uboot, iwamatsu

@ -224,11 +224,21 @@ static const struct clk_periph clks_sb[] = {
{ },
};
static inline int get_mux(struct a37xx_periphclk *priv, int shift)
static int get_mux(struct a37xx_periphclk *priv, int shift)
{
return (readl(priv->reg + TBG_SEL) >> shift) & 3;
}
static void set_mux(struct a37xx_periphclk *priv, int shift, int val)
{
u32 reg;
reg = readl(priv->reg + TBG_SEL);
reg &= ~(3 << shift);
reg |= (val & 3) << shift;
writel(reg, priv->reg + TBG_SEL);
}
static ulong periph_clk_get_rate(struct a37xx_periphclk *priv, int id);
static ulong get_parent_rate(struct a37xx_periphclk *priv, int id)
@ -277,6 +287,17 @@ static ulong get_div(struct a37xx_periphclk *priv,
return 0;
}
static void set_div_val(struct a37xx_periphclk *priv,
const struct clk_periph *clk, int idx, int val)
{
u32 reg;
reg = readl(priv->reg + clk->div_reg_off[idx]);
reg &= ~(clk->div_mask[idx] << clk->div_shift[idx]);
reg |= (val & clk->div_mask[idx]) << clk->div_shift[idx];
writel(reg, priv->reg + clk->div_reg_off[idx]);
}
static ulong periph_clk_get_rate(struct a37xx_periphclk *priv, int id)
{
const struct clk_periph *clk = &priv->clks[id];
@ -337,6 +358,111 @@ static int armada_37xx_periph_clk_disable(struct clk *clk)
return periph_clk_enable(clk, 0);
}
#define diff(a, b) abs((long)(a) - (long)(b))
static ulong find_best_div(const struct clk_div_table *t0,
const struct clk_div_table *t1, ulong parent_rate,
ulong req_rate, int *v0, int *v1)
{
const struct clk_div_table *i, *j;
ulong rate, best_rate = 0;
for (i = t0; i && i->div; ++i) {
for (j = t1; j && j->div; ++j) {
rate = DIV_ROUND_UP(parent_rate, i->div * j->div);
if (!best_rate ||
diff(rate, req_rate) < diff(best_rate, req_rate)) {
best_rate = rate;
*v0 = i->val;
*v1 = j->val;
}
}
}
return best_rate;
}
static ulong armada_37xx_periph_clk_set_rate(struct clk *clk, ulong req_rate)
{
struct a37xx_periphclk *priv = dev_get_priv(clk->dev);
const struct clk_periph *periph_clk = &priv->clks[clk->id];
ulong rate, old_rate, parent_rate;
int div_val0 = 0, div_val1 = 0;
const struct clk_div_table *t1;
static const struct clk_div_table empty_table[2] = {
{ 1, 0 },
{ 0, 0 }
};
if (clk->id > priv->count)
return -EINVAL;
old_rate = periph_clk_get_rate(priv, clk->id);
if (old_rate == -EINVAL)
return -EINVAL;
if (old_rate == req_rate)
return old_rate;
if (!periph_clk->can_gate || !periph_clk->dividers)
return -ENOTSUPP;
parent_rate = get_parent_rate(priv, clk->id);
if (parent_rate == -EINVAL)
return -EINVAL;
t1 = empty_table;
if (periph_clk->dividers > 1)
t1 = periph_clk->div_table[1];
rate = find_best_div(periph_clk->div_table[0], t1, parent_rate,
req_rate, &div_val0, &div_val1);
periph_clk_enable(clk, 0);
set_div_val(priv, periph_clk, 0, div_val0);
if (periph_clk->dividers > 1)
set_div_val(priv, periph_clk, 1, div_val1);
periph_clk_enable(clk, 1);
return rate;
}
static int armada_37xx_periph_clk_set_parent(struct clk *clk,
struct clk *parent)
{
struct a37xx_periphclk *priv = dev_get_priv(clk->dev);
const struct clk_periph *periph_clk = &priv->clks[clk->id];
struct clk check_parent;
int ret;
/* We also check if parent is our TBG clock */
if (clk->id > priv->count || parent->id >= MAX_TBG_PARENTS)
return -EINVAL;
if (!periph_clk->can_mux || !periph_clk->can_gate)
return -ENOTSUPP;
ret = clk_get_by_index(clk->dev, 0, &check_parent);
if (ret < 0)
return ret;
if (parent->dev != check_parent.dev)
ret = -EINVAL;
clk_free(&check_parent);
if (ret < 0)
return ret;
periph_clk_enable(clk, 0);
set_mux(priv, periph_clk->mux_shift, parent->id);
periph_clk_enable(clk, 1);
return 0;
}
#if defined(CONFIG_CMD_CLK) && defined(CONFIG_CLK_ARMADA_3720)
static int armada_37xx_periph_clk_dump(struct udevice *dev)
{
@ -473,6 +599,8 @@ static int armada_37xx_periph_clk_probe(struct udevice *dev)
static const struct clk_ops armada_37xx_periph_clk_ops = {
.get_rate = armada_37xx_periph_clk_get_rate,
.set_rate = armada_37xx_periph_clk_set_rate,
.set_parent = armada_37xx_periph_clk_set_parent,
.enable = armada_37xx_periph_clk_enable,
.disable = armada_37xx_periph_clk_disable,
};

@ -24,14 +24,16 @@ DECLARE_GLOBAL_DATA_PTR;
#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
#define CHIP_DELAY_TIMEOUT 200
#define NAND_STOP_DELAY 40
#define PAGE_CHUNK_SIZE (2048)
/*
* Define a buffer size for the initial command that detects the flash device:
* STATUS, READID and PARAM. The largest of these is the PARAM command,
* needing 256 bytes.
* STATUS, READID and PARAM.
* ONFI param page is 256 bytes, and there are three redundant copies
* to be read. JEDEC param page is 512 bytes, and there are also three
* redundant copies to be read.
* Hence this buffer should be at least 512 x 3. Let's pick 2048.
*/
#define INIT_BUFFER_SIZE 256
#define INIT_BUFFER_SIZE 2048
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
@ -58,7 +60,7 @@ DECLARE_GLOBAL_DATA_PTR;
#define NDCR_ND_MODE (0x3 << 21)
#define NDCR_NAND_MODE (0x0)
#define NDCR_CLR_PG_CNT (0x1 << 20)
#define NDCR_STOP_ON_UNCOR (0x1 << 19)
#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
@ -109,6 +111,13 @@ DECLARE_GLOBAL_DATA_PTR;
#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
/*
* This should be large enough to read 'ONFI' and 'JEDEC'.
* Let's use 7 bytes, which is the maximum ID count supported
* by the controller (see NDCR_RD_ID_CNT_MASK).
*/
#define READ_ID_BYTES 7
/* macros for registers read/write */
#define nand_writel(info, off, val) \
writel((val), (info)->mmio_base + (off))
@ -146,7 +155,6 @@ enum pxa3xx_nand_variant {
struct pxa3xx_nand_host {
struct nand_chip chip;
struct mtd_info *mtd;
void *info_data;
/* page size of attached chip */
@ -156,8 +164,6 @@ struct pxa3xx_nand_host {
/* calculated from pxa3xx_nand_flash data */
unsigned int col_addr_cycles;
unsigned int row_addr_cycles;
size_t read_id_bytes;
};
struct pxa3xx_nand_info {
@ -193,15 +199,44 @@ struct pxa3xx_nand_info {
int use_spare; /* use spare ? */
int need_wait;
unsigned int data_size; /* data to be read from FIFO */
unsigned int chunk_size; /* split commands chunk size */
unsigned int oob_size;
/* Amount of real data per full chunk */
unsigned int chunk_size;
/* Amount of spare data per full chunk */
unsigned int spare_size;
/* Number of full chunks (i.e chunk_size + spare_size) */
unsigned int nfullchunks;
/*
* Total number of chunks. If equal to nfullchunks, then there
* are only full chunks. Otherwise, there is one last chunk of
* size (last_chunk_size + last_spare_size)
*/
unsigned int ntotalchunks;
/* Amount of real data in the last chunk */
unsigned int last_chunk_size;
/* Amount of spare data in the last chunk */
unsigned int last_spare_size;
unsigned int ecc_size;
unsigned int ecc_err_cnt;
unsigned int max_bitflips;
int retcode;
/*
* Variables only valid during command
* execution. step_chunk_size and step_spare_size is the
* amount of real data and spare data in the current
* chunk. cur_chunk is the current chunk being
* read/programmed.
*/
unsigned int step_chunk_size;
unsigned int step_spare_size;
unsigned int cur_chunk;
/* cached register value */
uint32_t reg_ndcr;
uint32_t ndtr0cs0;
@ -215,13 +250,33 @@ struct pxa3xx_nand_info {
};
static struct pxa3xx_nand_timing timing[] = {
/*
* tCH Enable signal hold time
* tCS Enable signal setup time
* tWH ND_nWE high duration
* tWP ND_nWE pulse time
* tRH ND_nRE high duration
* tRP ND_nRE pulse width
* tR ND_nWE high to ND_nRE low for read
* tWHR ND_nWE high to ND_nRE low for status read
* tAR ND_ALE low to ND_nRE low delay
*/
/*ch cs wh wp rh rp r whr ar */
{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
{ 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
{ 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
{ 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
{ 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
};
static struct pxa3xx_nand_flash builtin_flash_types[] = {
/*
* chip_id
* flash_width Width of Flash memory (DWIDTH_M)
* dfc_width Width of flash controller(DWIDTH_C)
* *timing
* http://www.linux-mtd.infradead.org/nand-data/nanddata.html
*/
{ 0x46ec, 16, 16, &timing[1] },
{ 0xdaec, 8, 8, &timing[1] },
{ 0xd7ec, 8, 8, &timing[1] },
@ -230,6 +285,7 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
{ 0xdc2c, 8, 8, &timing[2] },
{ 0xcc2c, 16, 16, &timing[2] },
{ 0xba20, 16, 16, &timing[3] },
{ 0xda98, 8, 8, &timing[4] },
};
#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
@ -267,6 +323,20 @@ static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
.oobfree = { {2, 30} }
};
static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
.eccbytes = 64,
.eccpos = {
64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119,
120, 121, 122, 123, 124, 125, 126, 127},
.oobfree = { {1, 4}, {6, 26} }
};
static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
.eccbytes = 64,
.eccpos = {
@ -282,6 +352,33 @@ static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
.oobfree = { {6, 26}, { 64, 32} }
};
static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
.eccbytes = 128,
.eccpos = {
32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63,
96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119,
120, 121, 122, 123, 124, 125, 126, 127,
160, 161, 162, 163, 164, 165, 166, 167,
168, 169, 170, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182, 183,
184, 185, 186, 187, 188, 189, 190, 191,
224, 225, 226, 227, 228, 229, 230, 231,
232, 233, 234, 235, 236, 237, 238, 239,
240, 241, 242, 243, 244, 245, 246, 247,
248, 249, 250, 251, 252, 253, 254, 255},
/* Bootrom looks in bytes 0 & 5 for bad blocks */
.oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
};
static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
.eccbytes = 128,
.eccpos = {
@ -292,6 +389,13 @@ static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
.oobfree = { }
};
static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
.eccbytes = 256,
.eccpos = {},
/* HW ECC handles all ECC data and all spare area is free for OOB */
.oobfree = {{0, 160} }
};
#define NDTR0_tCH(c) (min((c), 7) << 19)
#define NDTR0_tCS(c) (min((c), 7) << 16)
#define NDTR0_tWH(c) (min((c), 7) << 11)
@ -347,9 +451,9 @@ static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
u32 tR = chip->chip_delay * 1000;
u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
@ -381,16 +485,17 @@ static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
struct nand_chip *chip = &host->chip;
struct pxa3xx_nand_info *info = host->info_data;
const struct pxa3xx_nand_flash *f = NULL;
struct mtd_info *mtd = nand_to_mtd(&host->chip);
int mode, id, ntypes, i;
mode = onfi_get_async_timing_mode(chip);
if (mode == ONFI_TIMING_MODE_UNKNOWN) {
ntypes = ARRAY_SIZE(builtin_flash_types);
chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
id = chip->read_byte(host->mtd);
id |= chip->read_byte(host->mtd) << 0x8;
id = chip->read_byte(mtd);
id |= chip->read_byte(mtd) << 0x8;
for (i = 0; i < ntypes; i++) {
f = &builtin_flash_types[i];
@ -427,25 +532,6 @@ static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
return 0;
}
/*
* Set the data and OOB size, depending on the selected
* spare and ECC configuration.
* Only applicable to READ0, READOOB and PAGEPROG commands.
*/
static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
struct mtd_info *mtd)
{
int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
info->data_size = mtd->writesize;
if (!oob_enable)
return;
info->oob_size = info->spare_size;
if (!info->use_ecc)
info->oob_size += info->ecc_size;
}
/**
* NOTE: it is a must to set ND_RUN first, then write
* command buffer, otherwise, it does not work.
@ -478,8 +564,8 @@ static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
ndcr |= NDCR_ND_RUN;
/* clear status bits and run */
nand_writel(info, NDCR, 0);
nand_writel(info, NDSR, NDSR_MASK);
nand_writel(info, NDCR, 0);
nand_writel(info, NDCR, ndcr);
}
@ -526,39 +612,38 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
static void handle_data_pio(struct pxa3xx_nand_info *info)
{
unsigned int do_bytes = min(info->data_size, info->chunk_size);
switch (info->state) {
case STATE_PIO_WRITING:
writesl(info->mmio_base + NDDB,
info->data_buff + info->data_buff_pos,
DIV_ROUND_UP(do_bytes, 4));
if (info->step_chunk_size)
writesl(info->mmio_base + NDDB,
info->data_buff + info->data_buff_pos,
DIV_ROUND_UP(info->step_chunk_size, 4));
if (info->oob_size > 0)
if (info->step_spare_size)
writesl(info->mmio_base + NDDB,
info->oob_buff + info->oob_buff_pos,
DIV_ROUND_UP(info->oob_size, 4));
DIV_ROUND_UP(info->step_spare_size, 4));
break;
case STATE_PIO_READING:
drain_fifo(info,
info->data_buff + info->data_buff_pos,
DIV_ROUND_UP(do_bytes, 4));
if (info->step_chunk_size)
drain_fifo(info,
info->data_buff + info->data_buff_pos,
DIV_ROUND_UP(info->step_chunk_size, 4));
if (info->oob_size > 0)
if (info->step_spare_size)
drain_fifo(info,
info->oob_buff + info->oob_buff_pos,
DIV_ROUND_UP(info->oob_size, 4));
DIV_ROUND_UP(info->step_spare_size, 4));
break;
default:
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state);
info->state);
BUG();
}
/* Update buffer pointers for multi-page read/write */
info->data_buff_pos += do_bytes;
info->oob_buff_pos += info->oob_size;
info->data_size -= do_bytes;
info->data_buff_pos += info->step_chunk_size;
info->oob_buff_pos += info->step_spare_size;
}
static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
@ -583,6 +668,9 @@ static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
cmd_done = NDSR_CS1_CMDD;
}
/* TODO - find out why we need the delay during write operation. */
ndelay(1);
status = nand_readl(info, NDSR);
if (status & NDSR_UNCORERR)
@ -620,8 +708,14 @@ static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
is_ready = 1;
}
/*
* Clear all status bit before issuing the next command, which
* can and will alter the status bits and will deserve a new
* interrupt on its own. This lets the controller exit the IRQ
*/
nand_writel(info, NDSR, status);
if (status & NDSR_WRCMDREQ) {
nand_writel(info, NDSR, NDSR_WRCMDREQ);
status &= ~NDSR_WRCMDREQ;
info->state = STATE_CMD_HANDLE;
@ -642,8 +736,6 @@ static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
nand_writel(info, NDCB0, info->ndcb3);
}
/* clear NDSR to let the controller exit the IRQ */
nand_writel(info, NDSR, status);
if (is_completed)
info->cmd_complete = 1;
if (is_ready)
@ -664,7 +756,7 @@ static void set_command_address(struct pxa3xx_nand_info *info,
unsigned int page_size, uint16_t column, int page_addr)
{
/* small page addr setting */
if (page_size < PAGE_CHUNK_SIZE) {
if (page_size < info->chunk_size) {
info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
| (column & 0xFF);
@ -683,14 +775,16 @@ static void set_command_address(struct pxa3xx_nand_info *info,
static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
{
struct pxa3xx_nand_host *host = info->host[info->cs];
struct mtd_info *mtd = host->mtd;
struct mtd_info *mtd = nand_to_mtd(&host->chip);
/* reset data and oob column point to handle data */
info->buf_start = 0;
info->buf_count = 0;
info->oob_size = 0;
info->data_buff_pos = 0;
info->oob_buff_pos = 0;
info->step_chunk_size = 0;
info->step_spare_size = 0;
info->cur_chunk = 0;
info->use_ecc = 0;
info->use_spare = 1;
info->retcode = ERR_NONE;
@ -700,10 +794,9 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
switch (command) {
case NAND_CMD_READ0:
case NAND_CMD_READOOB:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
case NAND_CMD_READOOB:
pxa3xx_set_datasize(info, mtd);
break;
case NAND_CMD_PARAM:
info->use_spare = 0;
@ -734,7 +827,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
struct mtd_info *mtd;
host = info->host[info->cs];
mtd = host->mtd;
mtd = nand_to_mtd(&host->chip);
addr_cycle = 0;
exec_cmd = 1;
@ -760,19 +853,27 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
if (command == NAND_CMD_READOOB)
info->buf_start += mtd->writesize;
if (info->cur_chunk < info->nfullchunks) {
info->step_chunk_size = info->chunk_size;
info->step_spare_size = info->spare_size;
} else {
info->step_chunk_size = info->last_chunk_size;
info->step_spare_size = info->last_spare_size;
}
/*
* Multiple page read needs an 'extended command type' field,
* which is either naked-read or last-read according to the
* state.
*/
if (mtd->writesize == PAGE_CHUNK_SIZE) {
if (mtd->writesize == info->chunk_size) {
info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
} else if (mtd->writesize > info->chunk_size) {
info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
| NDCB0_LEN_OVRD
| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
info->ndcb3 = info->chunk_size +
info->oob_size;
info->ndcb3 = info->step_chunk_size +
info->step_spare_size;
}
set_command_address(info, mtd->writesize, column, page_addr);
@ -787,13 +888,11 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
* Multiple page programming needs to execute the initial
* SEQIN command that sets the page address.
*/
if (mtd->writesize > PAGE_CHUNK_SIZE) {
if (mtd->writesize > info->chunk_size) {
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
| addr_cycle
| command;
/* No data transfer in this case */
info->data_size = 0;
exec_cmd = 1;
}
break;
@ -805,8 +904,16 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
break;
}
if (info->cur_chunk < info->nfullchunks) {
info->step_chunk_size = info->chunk_size;
info->step_spare_size = info->spare_size;
} else {
info->step_chunk_size = info->last_chunk_size;
info->step_spare_size = info->last_spare_size;
}
/* Second command setting for large pages */
if (mtd->writesize > PAGE_CHUNK_SIZE) {
if (mtd->writesize > info->chunk_size) {
/*
* Multiple page write uses the 'extended command'
* field. This can be used to issue a command dispatch
@ -815,14 +922,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
| NDCB0_LEN_OVRD
| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
info->ndcb3 = info->chunk_size +
info->oob_size;
info->ndcb3 = info->step_chunk_size +
info->step_spare_size;
/*
* This is the command dispatch that completes a chunked
* page program operation.
*/
if (info->data_size == 0) {
if (info->cur_chunk == info->ntotalchunks) {
info->ndcb0 = NDCB0_CMD_TYPE(0x1)
| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
| command;
@ -842,24 +949,24 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
break;
case NAND_CMD_PARAM:
info->buf_count = 256;
info->buf_count = INIT_BUFFER_SIZE;
info->ndcb0 |= NDCB0_CMD_TYPE(0)
| NDCB0_ADDR_CYC(1)
| NDCB0_LEN_OVRD
| command;
info->ndcb1 = (column & 0xFF);
info->ndcb3 = 256;
info->data_size = 256;
info->ndcb3 = INIT_BUFFER_SIZE;
info->step_chunk_size = INIT_BUFFER_SIZE;
break;
case NAND_CMD_READID:
info->buf_count = host->read_id_bytes;
info->buf_count = READ_ID_BYTES;
info->ndcb0 |= NDCB0_CMD_TYPE(3)
| NDCB0_ADDR_CYC(1)
| command;
info->ndcb1 = (column & 0xFF);
info->data_size = 8;
info->step_chunk_size = 8;
break;
case NAND_CMD_STATUS:
info->buf_count = 1;
@ -867,7 +974,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| NDCB0_ADDR_CYC(1)
| command;
info->data_size = 8;
info->step_chunk_size = 8;
break;
case NAND_CMD_ERASE1:
@ -1051,22 +1158,31 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
}
}
/* Only a few commands need several steps */
if (command != NAND_CMD_PAGEPROG &&
command != NAND_CMD_READ0 &&
command != NAND_CMD_READOOB)
break;
info->cur_chunk++;
/* Check if the sequence is complete */
if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
if (info->cur_chunk == info->ntotalchunks &&
command != NAND_CMD_PAGEPROG)
break;
/*
* After a splitted program command sequence has issued
* the command dispatch, the command sequence is complete.
*/
if (info->data_size == 0 &&
if (info->cur_chunk == (info->ntotalchunks + 1) &&
command == NAND_CMD_PAGEPROG &&
ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
break;
if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
/* Last read: issue a 'last naked read' */
if (info->data_size == info->chunk_size)
if (info->cur_chunk == info->ntotalchunks - 1)
ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
else
ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
@ -1076,7 +1192,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
* the command dispatch must be issued to complete.
*/
} else if (command == NAND_CMD_PAGEPROG &&
info->data_size == 0) {
info->cur_chunk == info->ntotalchunks) {
ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
}
} while (1);
@ -1218,42 +1334,42 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
return NAND_STATUS_READY;
}
static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
{
struct pxa3xx_nand_platform_data *pdata = info->pdata;
/* Configure default flash values */
info->reg_ndcr = 0x0; /* enable all interrupts */
info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
info->reg_ndcr |= NDCR_SPARE_EN;
return 0;
}
static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
{
struct pxa3xx_nand_host *host = info->host[info->cs];
struct mtd_info *mtd = host->mtd;
struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
struct nand_chip *chip = mtd_to_nand(mtd);
info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
return 0;
}
static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
{
/*
* We set 0 by hard coding here, for we don't support keep_config
* when there is more than one chip attached to the controller
*/
struct pxa3xx_nand_host *host = info->host[0];
struct pxa3xx_nand_platform_data *pdata = info->pdata;
uint32_t ndcr = nand_readl(info, NDCR);
if (ndcr & NDCR_PAGE_SZ) {
/* Controller's FIFO size */
info->chunk_size = 2048;
host->read_id_bytes = 4;
} else {
info->chunk_size = 512;
host->read_id_bytes = 2;
}
/* Set an initial chunk size */
info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
info->reg_ndcr = ndcr &
~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
return 0;
}
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
@ -1273,13 +1389,13 @@ static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
const struct nand_sdr_timings *timings;
int ret;
mtd = info->host[info->cs]->mtd;
mtd = nand_to_mtd(&info->host[info->cs]->chip);
chip = mtd_to_nand(mtd);
/* configure default flash values */
info->reg_ndcr = 0x0; /* enable all interrupts */
info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
/* use the common timing to make a try */
@ -1302,6 +1418,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
int strength, int ecc_stepsize, int page_size)
{
if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
info->nfullchunks = 1;
info->ntotalchunks = 1;
info->chunk_size = 2048;
info->spare_size = 40;
info->ecc_size = 24;
@ -1310,6 +1428,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->strength = 1;
} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
info->nfullchunks = 1;
info->ntotalchunks = 1;
info->chunk_size = 512;
info->spare_size = 8;
info->ecc_size = 8;
@ -1323,6 +1443,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
*/
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
info->ecc_bch = 1;
info->nfullchunks = 1;
info->ntotalchunks = 1;
info->chunk_size = 2048;
info->spare_size = 32;
info->ecc_size = 32;
@ -1333,6 +1455,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
info->ecc_bch = 1;
info->nfullchunks = 2;
info->ntotalchunks = 2;
info->chunk_size = 2048;
info->spare_size = 32;
info->ecc_size = 32;
@ -1341,19 +1465,64 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->layout = &ecc_layout_4KB_bch4bit;
ecc->strength = 16;
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
info->ecc_bch = 1;
info->nfullchunks = 4;
info->ntotalchunks = 4;
info->chunk_size = 2048;
info->spare_size = 32;
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
ecc->layout = &ecc_layout_8KB_bch4bit;
ecc->strength = 16;
/*
* Required ECC: 8-bit correction per 512 bytes
* Select: 16-bit correction per 1024 bytes
*/
} else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
info->ecc_bch = 1;
info->nfullchunks = 1;
info->ntotalchunks = 2;
info->chunk_size = 1024;
info->spare_size = 0;
info->last_chunk_size = 1024;
info->last_spare_size = 64;
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
ecc->layout = &ecc_layout_2KB_bch8bit;
ecc->strength = 16;
} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
info->ecc_bch = 1;
info->nfullchunks = 4;
info->ntotalchunks = 5;
info->chunk_size = 1024;
info->spare_size = 0;
info->last_chunk_size = 0;
info->last_spare_size = 64;
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
ecc->layout = &ecc_layout_4KB_bch8bit;
ecc->strength = 16;
} else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
info->ecc_bch = 1;
info->nfullchunks = 8;
info->ntotalchunks = 9;
info->chunk_size = 1024;
info->spare_size = 0;
info->last_chunk_size = 0;
info->last_spare_size = 160;
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
ecc->layout = &ecc_layout_8KB_bch8bit;
ecc->strength = 16;
} else {
dev_err(&info->pdev->dev,
"ECC strength %d at page size %d is not supported\n",
@ -1373,21 +1542,21 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
int ret;
uint16_t ecc_strength, ecc_step;
if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
goto KEEP_CONFIG;
/* Set a default chunk size */
info->chunk_size = 512;
ret = pxa3xx_nand_sensing(host);
if (ret) {
dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
info->cs);
return ret;
if (pdata->keep_config) {
pxa3xx_nand_detect_config(info);
} else {
ret = pxa3xx_nand_config_ident(info);
if (ret)
return ret;
ret = pxa3xx_nand_sensing(host);
if (ret) {
dev_info(&info->pdev->dev,
"There is no chip on cs %d!\n",
info->cs);
return ret;
}
}
KEEP_CONFIG:
/* Device detection must be done with ECC disabled */
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
nand_writel(info, NDECCCTRL, 0x0);
@ -1404,10 +1573,6 @@ KEEP_CONFIG:
}
}
ret = pxa3xx_nand_config_flash(info);
if (ret)
return ret;
#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
/*
* We'll use a bad block table stored in-flash and don't
@ -1418,21 +1583,6 @@ KEEP_CONFIG:
chip->bbt_md = &bbt_mirror_descr;
#endif
/*
* If the page size is bigger than the FIFO size, let's check
* we are given the right variant and then switch to the extended
* (aka splitted) command handling,
*/
if (mtd->writesize > PAGE_CHUNK_SIZE) {
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
chip->cmdfunc = nand_cmdfunc_extended;
} else {
dev_err(&info->pdev->dev,
"unsupported page size on this variant\n");
return -ENODEV;
}
}
if (pdata->ecc_strength && pdata->ecc_step_size) {
ecc_strength = pdata->ecc_strength;
ecc_step = pdata->ecc_step_size;
@ -1452,6 +1602,21 @@ KEEP_CONFIG:
if (ret)
return ret;
/*
* If the page size is bigger than the FIFO size, let's check
* we are given the right variant and then switch to the extended
* (aka split) command handling,
*/
if (mtd->writesize > info->chunk_size) {
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
chip->cmdfunc = nand_cmdfunc_extended;
} else {
dev_err(&info->pdev->dev,
"unsupported page size on this variant\n");
return -ENODEV;
}
}
/* calculate addressing information */
if (mtd->writesize >= 2048)
host->col_addr_cycles = 2;
@ -1472,6 +1637,10 @@ KEEP_CONFIG:
host->row_addr_cycles = 3;
else
host->row_addr_cycles = 2;
if (!pdata->keep_config)
pxa3xx_nand_config_tail(info);
return nand_scan_tail(mtd);
}
@ -1494,10 +1663,8 @@ static int alloc_nand_resource(struct pxa3xx_nand_info *info)
mtd = nand_to_mtd(chip);
host = (struct pxa3xx_nand_host *)chip;
info->host[cs] = host;
host->mtd = mtd;
host->cs = cs;
host->info_data = info;
host->read_id_bytes = 4;
mtd->owner = THIS_MODULE;
nand_set_controller_data(chip, host);
@ -1612,7 +1779,7 @@ static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
probe_success = 0;
for (cs = 0; cs < pdata->num_cs; cs++) {
struct mtd_info *mtd = info->host[cs]->mtd;
struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
/*
* The mtd name matches the one used in 'mtdparts' kernel

@ -6,7 +6,7 @@
#ifndef _COMPHY_A3700_H_
#define _COMPHY_A3700_H_
#include "comphy.h"
#include "comphy_core.h"
#include "comphy_hpipe.h"
#define MVEBU_REG(offs) \

@ -11,7 +11,7 @@
#include <linux/errno.h>
#include <asm/io.h>
#include "comphy.h"
#include "comphy_core.h"
#define COMPHY_MAX_CHIP 4
@ -66,6 +66,11 @@ void comphy_print(struct chip_serdes_phy_config *chip_cfg,
}
}
__weak int comphy_update_map(struct comphy_map *serdes_map, int count)
{
return 0;
}
static int comphy_probe(struct udevice *dev)
{
const void *blob = gd->fdt_blob;
@ -76,6 +81,7 @@ static int comphy_probe(struct udevice *dev)
int lane;
int last_idx = 0;
static int current_idx;
int res;
/* Save base addresses for later use */
chip_cfg->comphy_base_addr = (void *)devfdt_get_addr_index(dev, 0);
@ -143,6 +149,10 @@ static int comphy_probe(struct udevice *dev)
lane++;
}
res = comphy_update_map(comphy_map_data, chip_cfg->comphy_lanes_count);
if (res < 0)
return res;
/* Save CP index for MultiCP devices (A8K) */
chip_cfg->cp_index = current_idx++;
/* PHY power UP sequence */

@ -3,11 +3,11 @@
* Copyright (C) 2015-2016 Marvell International Ltd.
*/
#ifndef _COMPHY_H_
#define _COMPHY_H_
#ifndef _COMPHY_CORE_H_
#define _COMPHY_CORE_H_
#include <dt-bindings/comphy/comphy_data.h>
#include <fdtdec.h>
#include <mvebu/comphy.h>
#if defined(DEBUG)
#define debug_enter() printf("----> Enter %s\n", __func__);
@ -80,14 +80,6 @@ struct comphy_mux_data {
struct comphy_mux_options mux_values[MAX_LANE_OPTIONS];
};
struct comphy_map {
u32 type;
u32 speed;
u32 invert;
bool clk_src;
bool end_point;
};
struct chip_serdes_phy_config {
struct comphy_mux_data *mux_data;
int (*ptr_comphy_chip_init)(struct chip_serdes_phy_config *,
@ -183,5 +175,5 @@ void comphy_pcie_config_detect(u32 comphy_max_count,
struct comphy_map *serdes_map);
void comphy_pcie_unit_general_config(u32 pex_index);
#endif /* _COMPHY_H_ */
#endif /* _COMPHY_CORE_H_ */

@ -9,7 +9,7 @@
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "comphy.h"
#include "comphy_core.h"
#include "comphy_hpipe.h"
#include "sata.h"
#include "utmi_phy.h"
@ -641,7 +641,8 @@ static int comphy_usb3_power_up(u32 lane, void __iomem *hpipe_base,
}
static int comphy_sata_power_up(u32 lane, void __iomem *hpipe_base,
void __iomem *comphy_base, int cp_index)
void __iomem *comphy_base, int cp_index,
u32 invert)
{
u32 mask, data, i, ret = 1;
void __iomem *hpipe_addr = HPIPE_ADDR(hpipe_base, lane);
@ -927,6 +928,19 @@ static int comphy_sata_power_up(u32 lane, void __iomem *hpipe_base,
reg_set(hpipe_addr + HPIPE_PWR_CTR_REG,
0x0 << HPIPE_PWR_CTR_RST_DFE_OFFSET,
HPIPE_PWR_CTR_RST_DFE_MASK);
/* Set RX / TX swaps */
data = mask = 0;
if (invert & PHY_POLARITY_TXD_INVERT) {
data |= (1 << HPIPE_SYNC_PATTERN_TXD_SWAP_OFFSET);
mask |= HPIPE_SYNC_PATTERN_TXD_SWAP_MASK;
}
if (invert & PHY_POLARITY_RXD_INVERT) {
data |= (1 << HPIPE_SYNC_PATTERN_RXD_SWAP_OFFSET);
mask |= HPIPE_SYNC_PATTERN_RXD_SWAP_MASK;
}
reg_set(hpipe_addr + HPIPE_SYNC_PATTERN_REG, data, mask);
/* SW reset for interupt logic */
reg_set(hpipe_addr + HPIPE_PWR_CTR_REG,
0x1 << HPIPE_PWR_CTR_SFT_RST_OFFSET,
@ -2006,7 +2020,8 @@ int comphy_cp110_init(struct chip_serdes_phy_config *ptr_chip_cfg,
case PHY_TYPE_SATA3:
ret = comphy_sata_power_up(
lane, hpipe_base_addr, comphy_base_addr,
ptr_chip_cfg->cp_index);
ptr_chip_cfg->cp_index,
serdes_map[lane].invert);
break;
case PHY_TYPE_USB3_HOST0:
case PHY_TYPE_USB3_HOST1:

@ -221,6 +221,12 @@
(0x7 << HPIPE_LOOPBACK_SEL_OFFSET)
#define HPIPE_SYNC_PATTERN_REG 0x090
#define HPIPE_SYNC_PATTERN_TXD_SWAP_OFFSET 10
#define HPIPE_SYNC_PATTERN_TXD_SWAP_MASK \
(0x1 << HPIPE_SYNC_PATTERN_TXD_SWAP_OFFSET)
#define HPIPE_SYNC_PATTERN_RXD_SWAP_OFFSET 11
#define HPIPE_SYNC_PATTERN_RXD_SWAP_MASK \
(0x1 << HPIPE_SYNC_PATTERN_RXD_SWAP_OFFSET)
#define HPIPE_INTERFACE_REG 0x94
#define HPIPE_INTERFACE_GEN_MAX_OFFSET 10

@ -6,7 +6,7 @@
#include <common.h>
#include <asm/io.h>
#include "comphy.h"
#include "comphy_core.h"
#include "comphy_hpipe.h"
/*

3
env/sf.c vendored

@ -58,7 +58,8 @@ static int setup_flash_device(void)
/* speed and mode will be read from DT */
ret = spi_flash_probe_bus_cs(CONFIG_ENV_SPI_BUS, CONFIG_ENV_SPI_CS,
0, 0, &new);
CONFIG_ENV_SPI_MAX_HZ, CONFIG_ENV_SPI_MODE,
&new);
if (ret) {
set_default_env("spi_flash_probe_bus_cs() failed", 0);
return ret;

@ -29,6 +29,10 @@
*/
#define CONFIG_SYS_MMC_BASE MVEBU_SDIO_BASE
#ifdef CONFIG_CMD_MMC
#define CONFIG_SUPPORT_EMMC_BOOT
#endif
/* USB/EHCI configuration */
#define CONFIG_EHCI_IS_TDI

@ -28,6 +28,12 @@
#define CONFIG_SYS_I2C_SLAVE 0x0
#define CONFIG_SYS_I2C_SPEED 100000
/*
* SPI Flash configuration for the environemnt access
*/
#define CONFIG_ENV_SPI_BUS 0
#define CONFIG_ENV_SPI_CS 0
/* SPI NOR flash default params, used by sf commands */
#define CONFIG_SF_DEFAULT_SPEED 1000000
#define CONFIG_SF_DEFAULT_MODE SPI_MODE_3

@ -64,6 +64,9 @@
#define CONFIG_ENV_SIZE (64 << 10) /* 64KiB */
#define CONFIG_ENV_SECT_SIZE (64 << 10) /* 64KiB sectors */
/* When runtime detection fails this is the default */
#define CONFIG_SYS_MMC_ENV_DEV 0
#define CONFIG_SYS_MAX_NAND_DEVICE 1
#define CONFIG_SYS_NAND_MAX_CHIPS 1
#define CONFIG_SYS_NAND_ONFI_DETECTION

@ -69,7 +69,6 @@
"0x500000@0xc0000(uimage),"\
"0x1a40000@0x5c0000(rootfs)\0" \
"mtdids=nand0=orion_nand\0"\
"bootdelay=-1\0"\
"autostart=no\0"\
"autoload=no\0"

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2015-2016 Marvell International Ltd.
*/
#ifndef _MVEBU_COMPHY_H_
#define _MVEBU_COMPHY_H_
#include <dt-bindings/comphy/comphy_data.h>
struct comphy_map {
u32 type;
u32 speed;
u32 invert;
bool clk_src;
bool end_point;
};
int comphy_update_map(struct comphy_map *serdes_map, int count);
#endif /* _MVEBU_COMPHY_H_ */

@ -286,6 +286,7 @@ kwboot_bootmsg(int tty, void *msg)
{
int rc;
char c;
int count;
if (msg == NULL)
kwboot_printv("Please reboot the target into UART boot mode...");
@ -297,10 +298,12 @@ kwboot_bootmsg(int tty, void *msg)
if (rc)
break;
rc = kwboot_tty_send(tty, msg, 8);
if (rc) {
usleep(msg_req_delay * 1000);
continue;
for (count = 0; count < 128; count++) {
rc = kwboot_tty_send(tty, msg, 8);
if (rc) {
usleep(msg_req_delay * 1000);
continue;
}
}
rc = kwboot_tty_recv(tty, &c, 1, msg_rsp_timeo);
@ -426,6 +429,9 @@ kwboot_xmodem(int tty, const void *_data, size_t size)
kwboot_printv("Sending boot image...\n");
sleep(2); /* flush isn't effective without it */
tcflush(tty, TCIOFLUSH);
do {
struct kwboot_block block;
int n;

Loading…
Cancel
Save