CI: https://source.denx.de/u-boot/custodians/u-boot-nand-flash/-/pipelines/27258

This series address issues found by Andrew Goodbody and mostly drop
driver that are not used by any board
This commit is contained in:
Tom Rini 2025-08-04 08:23:43 -06:00
commit 851c3f28d0
15 changed files with 33 additions and 4235 deletions

View File

@ -17,25 +17,6 @@
#define CFG_SYS_BAUDRATE_TABLE \ #define CFG_SYS_BAUDRATE_TABLE \
{ 9600, 19200, 38400, 57600, 115200, 230400, 460800 } { 9600, 19200, 38400, 57600, 115200, 230400, 460800 }
/* NAND */
#if defined(CONFIG_NAND_LPC32XX_SLC)
#define NAND_LARGE_BLOCK_PAGE_SIZE 0x800
#define NAND_SMALL_BLOCK_PAGE_SIZE 0x200
#if (CONFIG_SYS_NAND_PAGE_SIZE == NAND_LARGE_BLOCK_PAGE_SIZE)
#define CFG_SYS_NAND_ECCPOS { 40, 41, 42, 43, 44, 45, 46, 47, \
48, 49, 50, 51, 52, 53, 54, 55, \
56, 57, 58, 59, 60, 61, 62, 63, }
#elif (CONFIG_SYS_NAND_PAGE_SIZE == NAND_SMALL_BLOCK_PAGE_SIZE)
#define CFG_SYS_NAND_ECCPOS { 10, 11, 12, 13, 14, 15, }
#else
#error "CONFIG_SYS_NAND_PAGE_SIZE set to an invalid value"
#endif
#define CFG_SYS_NAND_ECCSIZE 0x100
#define CFG_SYS_NAND_ECCBYTES 3
#endif /* CONFIG_NAND_LPC32XX_SLC */
/* NOR Flash */ /* NOR Flash */
/* USB OHCI */ /* USB OHCI */

View File

@ -261,11 +261,6 @@ config NAND_LPC32XX_MLC
help help
Enable the LPC32XX MLC NAND controller. Enable the LPC32XX MLC NAND controller.
config NAND_LPC32XX_SLC
bool "Support LPC32XX_SLC controller"
help
Enable the LPC32XX SLC NAND controller.
config NAND_OMAP_GPMC config NAND_OMAP_GPMC
bool "Support OMAP GPMC NAND controller" bool "Support OMAP GPMC NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3
@ -556,12 +551,6 @@ config NAND_MXS_USE_MINIMUM_ECC
endif endif
config NAND_MXIC
bool "Macronix raw NAND controller"
select SYS_NAND_SELF_INIT
help
This selects the Macronix raw NAND controller driver.
config NAND_ZYNQ config NAND_ZYNQ
bool "Support for Zynq Nand controller" bool "Support for Zynq Nand controller"
select SPL_SYS_NAND_SELF_INIT select SPL_SYS_NAND_SELF_INIT
@ -579,22 +568,6 @@ config NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS
This flag prevent U-Boot reconfigure NAND flash controller and reuse This flag prevent U-Boot reconfigure NAND flash controller and reuse
the NAND timing from 1st stage bootloader. the NAND timing from 1st stage bootloader.
config NAND_OCTEONTX
bool "Support for OcteonTX NAND controller"
select SYS_NAND_SELF_INIT
imply CMD_NAND
help
This enables Nand flash controller hardware found on the OcteonTX
processors.
config NAND_OCTEONTX_HW_ECC
bool "Support Hardware ECC for OcteonTX NAND controller"
depends on NAND_OCTEONTX
default y
help
This enables Hardware BCH engine found on the OcteonTX processors to
support ECC for NAND flash controller.
config NAND_STM32_FMC2 config NAND_STM32_FMC2
bool "Support for NAND controller on STM32MP SoCs" bool "Support for NAND controller on STM32MP SoCs"
depends on ARCH_STM32MP depends on ARCH_STM32MP
@ -684,7 +657,7 @@ config SYS_NAND_ONFI_DETECTION
config SYS_NAND_PAGE_SIZE config SYS_NAND_PAGE_SIZE
hex "NAND chip page size" hex "NAND chip page size"
depends on ARCH_SUNXI || NAND_OMAP_GPMC || NAND_LPC32XX_SLC || \ depends on ARCH_SUNXI || NAND_OMAP_GPMC || \
SPL_NAND_SIMPLE || (NAND_MXC && SPL_NAND_SUPPORT) || \ SPL_NAND_SIMPLE || (NAND_MXC && SPL_NAND_SUPPORT) || \
MVEBU_SPL_BOOT_DEVICE_NAND || \ MVEBU_SPL_BOOT_DEVICE_NAND || \
(NAND_ATMEL && SPL_NAND_SUPPORT) || \ (NAND_ATMEL && SPL_NAND_SUPPORT) || \
@ -696,7 +669,7 @@ config SYS_NAND_PAGE_SIZE
config SYS_NAND_OOBSIZE config SYS_NAND_OOBSIZE
hex "NAND chip OOB size" hex "NAND chip OOB size"
depends on ARCH_SUNXI || NAND_OMAP_GPMC || NAND_LPC32XX_SLC || \ depends on ARCH_SUNXI || NAND_OMAP_GPMC || \
SPL_NAND_SIMPLE || (NAND_MXC && SPL_NAND_SUPPORT) || \ SPL_NAND_SIMPLE || (NAND_MXC && SPL_NAND_SUPPORT) || \
(NAND_ATMEL && SPL_NAND_SUPPORT) || SPL_GENERATE_ATMEL_PMECC_HEADER (NAND_ATMEL && SPL_NAND_SUPPORT) || SPL_GENERATE_ATMEL_PMECC_HEADER
depends on !NAND_MXS && !NAND_DENALI_DT && !NAND_LPC32XX_MLC depends on !NAND_MXS && !NAND_DENALI_DT && !NAND_LPC32XX_MLC

View File

@ -61,21 +61,17 @@ obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o obj-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o
obj-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o obj-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o
obj-$(CONFIG_NAND_LPC32XX_MLC) += lpc32xx_nand_mlc.o obj-$(CONFIG_NAND_LPC32XX_MLC) += lpc32xx_nand_mlc.o
obj-$(CONFIG_NAND_LPC32XX_SLC) += lpc32xx_nand_slc.o
obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o
obj-$(CONFIG_NAND_MESON) += meson_nand.o obj-$(CONFIG_NAND_MESON) += meson_nand.o
obj-$(CONFIG_NAND_MXC) += mxc_nand.o obj-$(CONFIG_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_NAND_MXS) += mxs_nand.o obj-$(CONFIG_NAND_MXS) += mxs_nand.o
obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o
obj-$(CONFIG_NAND_OCTEONTX) += octeontx_nand.o
obj-$(CONFIG_NAND_OCTEONTX_HW_ECC) += octeontx_bch.o
obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o
obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o
obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o
obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o
obj-$(CONFIG_NAND_SANDBOX) += sand_nand.o obj-$(CONFIG_NAND_SANDBOX) += sand_nand.o
obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_NAND_MXIC) += mxic_nand.o
obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o
obj-$(CONFIG_NAND_STM32_FMC2) += stm32_fmc2_nand.o obj-$(CONFIG_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_CORTINA_NAND) += cortina_nand.o obj-$(CONFIG_CORTINA_NAND) += cortina_nand.o

View File

@ -186,14 +186,13 @@ int init_nand_dma(struct nand_chip *nand)
info->tx_desc = malloc_cache_aligned((sizeof(struct tx_descriptor_t) * info->tx_desc = malloc_cache_aligned((sizeof(struct tx_descriptor_t) *
CA_DMA_DESC_NUM)); CA_DMA_DESC_NUM));
if (!info->tx_desc) {
printf("Fail to alloc DMA descript!\n");
return -ENOMEM;
}
info->rx_desc = malloc_cache_aligned((sizeof(struct rx_descriptor_t) * info->rx_desc = malloc_cache_aligned((sizeof(struct rx_descriptor_t) *
CA_DMA_DESC_NUM)); CA_DMA_DESC_NUM));
if (!info->rx_desc) {
if (!info->rx_desc && info->tx_desc) {
printf("Fail to alloc DMA descript!\n");
kfree(info->tx_desc);
return -ENOMEM;
} else if (info->rx_desc && !info->tx_desc) {
printf("Fail to alloc DMA descript!\n"); printf("Fail to alloc DMA descript!\n");
kfree(info->tx_desc); kfree(info->tx_desc);
return -ENOMEM; return -ENOMEM;

View File

@ -173,13 +173,9 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
time_left--; time_left--;
} }
if (!time_left) { dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", irq_mask);
irq_mask); return 0;
return 0;
}
return denali->irq_status;
} }
static uint32_t denali_check_irq(struct denali_nand_info *denali) static uint32_t denali_check_irq(struct denali_nand_info *denali)

View File

@ -1,587 +0,0 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* LPC32xx SLC NAND flash controller driver
*
* (C) Copyright 2015-2018 Vladimir Zapolskiy <vz@mleia.com>
* Copyright (c) 2015 Tyco Fire Protection Products.
*
* Hardware ECC support original source code
* Copyright (C) 2008 by NXP Semiconductors
* Author: Kevin Wells
*/
#include <config.h>
#include <log.h>
#include <nand.h>
#include <linux/bug.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/rawnand.h>
#include <linux/errno.h>
#include <asm/io.h>
#include <asm/arch/config.h>
#include <asm/arch/clk.h>
#include <asm/arch/sys_proto.h>
#include <asm/arch/dma.h>
#include <asm/arch/cpu.h>
#include <linux/printk.h>
struct lpc32xx_nand_slc_regs {
u32 data;
u32 addr;
u32 cmd;
u32 stop;
u32 ctrl;
u32 cfg;
u32 stat;
u32 int_stat;
u32 ien;
u32 isr;
u32 icr;
u32 tac;
u32 tc;
u32 ecc;
u32 dma_data;
};
/* CFG register */
#define CFG_CE_LOW (1 << 5)
#define CFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
#define CFG_ECC_EN (1 << 3) /* ECC enable bit */
#define CFG_DMA_BURST (1 << 2) /* DMA burst bit */
#define CFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
/* CTRL register */
#define CTRL_SW_RESET (1 << 2)
#define CTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
#define CTRL_DMA_START (1 << 0) /* Start DMA channel bit */
/* STAT register */
#define STAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
#define STAT_NAND_READY (1 << 0)
/* INT_STAT register */
#define INT_STAT_TC (1 << 1)
#define INT_STAT_RDY (1 << 0)
/* TAC register bits, be aware of overflows */
#define TAC_W_RDY(n) (max_t(uint32_t, (n), 0xF) << 28)
#define TAC_W_WIDTH(n) (max_t(uint32_t, (n), 0xF) << 24)
#define TAC_W_HOLD(n) (max_t(uint32_t, (n), 0xF) << 20)
#define TAC_W_SETUP(n) (max_t(uint32_t, (n), 0xF) << 16)
#define TAC_R_RDY(n) (max_t(uint32_t, (n), 0xF) << 12)
#define TAC_R_WIDTH(n) (max_t(uint32_t, (n), 0xF) << 8)
#define TAC_R_HOLD(n) (max_t(uint32_t, (n), 0xF) << 4)
#define TAC_R_SETUP(n) (max_t(uint32_t, (n), 0xF) << 0)
/* NAND ECC Layout for small page NAND devices
* Note: For large page devices, the default layouts are used. */
static struct nand_ecclayout lpc32xx_nand_oob_16 = {
.eccbytes = 6,
.eccpos = { 10, 11, 12, 13, 14, 15, },
.oobfree = {
{ .offset = 0, .length = 4, },
{ .offset = 6, .length = 4, },
}
};
#if defined(CONFIG_DMA_LPC32XX) && !defined(CONFIG_XPL_BUILD)
#define ECCSTEPS (CONFIG_SYS_NAND_PAGE_SIZE / CFG_SYS_NAND_ECCSIZE)
/*
* DMA Descriptors
* For Large Block: 17 descriptors = ((16 Data and ECC Read) + 1 Spare Area)
* For Small Block: 5 descriptors = ((4 Data and ECC Read) + 1 Spare Area)
*/
static struct lpc32xx_dmac_ll dmalist[ECCSTEPS * 2 + 1];
static u32 ecc_buffer[8]; /* MAX ECC size */
static unsigned int dmachan = (unsigned int)-1; /* Invalid channel */
/*
* Helper macro for the DMA client (i.e. NAND SLC):
* - to write the next DMA linked list item address
* (see arch/include/asm/arch-lpc32xx/dma.h).
* - to assign the DMA data register to DMA source or destination address.
* - to assign the ECC register to DMA source or destination address.
*/
#define lpc32xx_dmac_next_lli(x) ((u32)x)
#define lpc32xx_dmac_set_dma_data() ((u32)&lpc32xx_nand_slc_regs->dma_data)
#define lpc32xx_dmac_set_ecc() ((u32)&lpc32xx_nand_slc_regs->ecc)
#endif
static struct lpc32xx_nand_slc_regs __iomem *lpc32xx_nand_slc_regs
= (struct lpc32xx_nand_slc_regs __iomem *)SLC_NAND_BASE;
static void lpc32xx_nand_init(void)
{
uint32_t hclk = get_hclk_clk_rate();
/* Reset SLC NAND controller */
writel(CTRL_SW_RESET, &lpc32xx_nand_slc_regs->ctrl);
/* 8-bit bus, no DMA, no ECC, ordinary CE signal */
writel(0, &lpc32xx_nand_slc_regs->cfg);
/* Interrupts disabled and cleared */
writel(0, &lpc32xx_nand_slc_regs->ien);
writel(INT_STAT_TC | INT_STAT_RDY,
&lpc32xx_nand_slc_regs->icr);
/* Configure NAND flash timings */
writel(TAC_W_RDY(CFG_LPC32XX_NAND_SLC_WDR_CLKS) |
TAC_W_WIDTH(hclk / CFG_LPC32XX_NAND_SLC_WWIDTH) |
TAC_W_HOLD(hclk / CFG_LPC32XX_NAND_SLC_WHOLD) |
TAC_W_SETUP(hclk / CFG_LPC32XX_NAND_SLC_WSETUP) |
TAC_R_RDY(CFG_LPC32XX_NAND_SLC_RDR_CLKS) |
TAC_R_WIDTH(hclk / CFG_LPC32XX_NAND_SLC_RWIDTH) |
TAC_R_HOLD(hclk / CFG_LPC32XX_NAND_SLC_RHOLD) |
TAC_R_SETUP(hclk / CFG_LPC32XX_NAND_SLC_RSETUP),
&lpc32xx_nand_slc_regs->tac);
}
static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd,
int cmd, unsigned int ctrl)
{
debug("ctrl: 0x%08x, cmd: 0x%08x\n", ctrl, cmd);
if (ctrl & NAND_NCE)
setbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_CE_LOW);
else
clrbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_CE_LOW);
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
writel(cmd & 0xFF, &lpc32xx_nand_slc_regs->cmd);
else if (ctrl & NAND_ALE)
writel(cmd & 0xFF, &lpc32xx_nand_slc_regs->addr);
}
static int lpc32xx_nand_dev_ready(struct mtd_info *mtd)
{
return readl(&lpc32xx_nand_slc_regs->stat) & STAT_NAND_READY;
}
#if defined(CONFIG_DMA_LPC32XX) && !defined(CONFIG_XPL_BUILD)
/*
* Prepares DMA descriptors for NAND RD/WR operations
* If the size is < 256 Bytes then it is assumed to be
* an OOB transfer
*/
static void lpc32xx_nand_dma_configure(struct nand_chip *chip,
const u8 *buffer, int size,
int read)
{
u32 i, dmasrc, ctrl, ecc_ctrl, oob_ctrl, dmadst;
struct lpc32xx_dmac_ll *dmalist_cur;
struct lpc32xx_dmac_ll *dmalist_cur_ecc;
/*
* CTRL descriptor entry for reading ECC
* Copy Multiple times to sync DMA with Flash Controller
*/
ecc_ctrl = 0x5 |
DMAC_CHAN_SRC_BURST_1 |
DMAC_CHAN_DEST_BURST_1 |
DMAC_CHAN_SRC_WIDTH_32 |
DMAC_CHAN_DEST_WIDTH_32 |
DMAC_CHAN_DEST_AHB1;
/* CTRL descriptor entry for reading/writing Data */
ctrl = (CFG_SYS_NAND_ECCSIZE / 4) |
DMAC_CHAN_SRC_BURST_4 |
DMAC_CHAN_DEST_BURST_4 |
DMAC_CHAN_SRC_WIDTH_32 |
DMAC_CHAN_DEST_WIDTH_32 |
DMAC_CHAN_DEST_AHB1;
/* CTRL descriptor entry for reading/writing Spare Area */
oob_ctrl = (CONFIG_SYS_NAND_OOBSIZE / 4) |
DMAC_CHAN_SRC_BURST_4 |
DMAC_CHAN_DEST_BURST_4 |
DMAC_CHAN_SRC_WIDTH_32 |
DMAC_CHAN_DEST_WIDTH_32 |
DMAC_CHAN_DEST_AHB1;
if (read) {
dmasrc = lpc32xx_dmac_set_dma_data();
dmadst = (u32)buffer;
ctrl |= DMAC_CHAN_DEST_AUTOINC;
} else {
dmadst = lpc32xx_dmac_set_dma_data();
dmasrc = (u32)buffer;
ctrl |= DMAC_CHAN_SRC_AUTOINC;
}
/*
* Write Operation Sequence for Small Block NAND
* ----------------------------------------------------------
* 1. X'fer 256 bytes of data from Memory to Flash.
* 2. Copy generated ECC data from Register to Spare Area
* 3. X'fer next 256 bytes of data from Memory to Flash.
* 4. Copy generated ECC data from Register to Spare Area.
* 5. X'fer 16 byets of Spare area from Memory to Flash.
* Read Operation Sequence for Small Block NAND
* ----------------------------------------------------------
* 1. X'fer 256 bytes of data from Flash to Memory.
* 2. Copy generated ECC data from Register to ECC calc Buffer.
* 3. X'fer next 256 bytes of data from Flash to Memory.
* 4. Copy generated ECC data from Register to ECC calc Buffer.
* 5. X'fer 16 bytes of Spare area from Flash to Memory.
* Write Operation Sequence for Large Block NAND
* ----------------------------------------------------------
* 1. Steps(1-4) of Write Operations repeate for four times
* which generates 16 DMA descriptors to X'fer 2048 bytes of
* data & 32 bytes of ECC data.
* 2. X'fer 64 bytes of Spare area from Memory to Flash.
* Read Operation Sequence for Large Block NAND
* ----------------------------------------------------------
* 1. Steps(1-4) of Read Operations repeate for four times
* which generates 16 DMA descriptors to X'fer 2048 bytes of
* data & 32 bytes of ECC data.
* 2. X'fer 64 bytes of Spare area from Flash to Memory.
*/
for (i = 0; i < size/CFG_SYS_NAND_ECCSIZE; i++) {
dmalist_cur = &dmalist[i * 2];
dmalist_cur_ecc = &dmalist[(i * 2) + 1];
dmalist_cur->dma_src = (read ? (dmasrc) : (dmasrc + (i*256)));
dmalist_cur->dma_dest = (read ? (dmadst + (i*256)) : dmadst);
dmalist_cur->next_lli = lpc32xx_dmac_next_lli(dmalist_cur_ecc);
dmalist_cur->next_ctrl = ctrl;
dmalist_cur_ecc->dma_src = lpc32xx_dmac_set_ecc();
dmalist_cur_ecc->dma_dest = (u32)&ecc_buffer[i];
dmalist_cur_ecc->next_lli =
lpc32xx_dmac_next_lli(&dmalist[(i * 2) + 2]);
dmalist_cur_ecc->next_ctrl = ecc_ctrl;
}
if (i) { /* Data only transfer */
dmalist_cur_ecc = &dmalist[(i * 2) - 1];
dmalist_cur_ecc->next_lli = 0;
dmalist_cur_ecc->next_ctrl |= DMAC_CHAN_INT_TC_EN;
return;
}
/* OOB only transfer */
if (read) {
dmasrc = lpc32xx_dmac_set_dma_data();
dmadst = (u32)buffer;
oob_ctrl |= DMAC_CHAN_DEST_AUTOINC;
} else {
dmadst = lpc32xx_dmac_set_dma_data();
dmasrc = (u32)buffer;
oob_ctrl |= DMAC_CHAN_SRC_AUTOINC;
}
/* Read/ Write Spare Area Data To/From Flash */
dmalist_cur = &dmalist[i * 2];
dmalist_cur->dma_src = dmasrc;
dmalist_cur->dma_dest = dmadst;
dmalist_cur->next_lli = 0;
dmalist_cur->next_ctrl = (oob_ctrl | DMAC_CHAN_INT_TC_EN);
}
static void lpc32xx_nand_xfer(struct mtd_info *mtd, const u8 *buf,
int len, int read)
{
struct nand_chip *chip = mtd_to_nand(mtd);
u32 config;
int ret;
/* DMA Channel Configuration */
config = (read ? DMAC_CHAN_FLOW_D_P2M : DMAC_CHAN_FLOW_D_M2P) |
(read ? DMAC_DEST_PERIP(0) : DMAC_DEST_PERIP(DMA_PERID_NAND1)) |
(read ? DMAC_SRC_PERIP(DMA_PERID_NAND1) : DMAC_SRC_PERIP(0)) |
DMAC_CHAN_ENABLE;
/* Prepare DMA descriptors */
lpc32xx_nand_dma_configure(chip, buf, len, read);
/* Setup SLC controller and start transfer */
if (read)
setbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_DMA_DIR);
else /* NAND_ECC_WRITE */
clrbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_DMA_DIR);
setbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_DMA_BURST);
/* Write length for new transfers */
if (!((readl(&lpc32xx_nand_slc_regs->stat) & STAT_DMA_FIFO) |
readl(&lpc32xx_nand_slc_regs->tc))) {
int tmp = (len != mtd->oobsize) ? mtd->oobsize : 0;
writel(len + tmp, &lpc32xx_nand_slc_regs->tc);
}
setbits_le32(&lpc32xx_nand_slc_regs->ctrl, CTRL_DMA_START);
/* Start DMA transfers */
ret = lpc32xx_dma_start_xfer(dmachan, dmalist, config);
if (unlikely(ret < 0))
BUG();
/* Wait for NAND to be ready */
while (!lpc32xx_nand_dev_ready(mtd))
;
/* Wait till DMA transfer is DONE */
if (lpc32xx_dma_wait_status(dmachan))
pr_err("NAND DMA transfer error!\r\n");
/* Stop DMA & HW ECC */
clrbits_le32(&lpc32xx_nand_slc_regs->ctrl, CTRL_DMA_START);
clrbits_le32(&lpc32xx_nand_slc_regs->cfg,
CFG_DMA_DIR | CFG_DMA_BURST | CFG_ECC_EN | CFG_DMA_ECC);
}
static u32 slc_ecc_copy_to_buffer(u8 *spare, const u32 *ecc, int count)
{
int i;
for (i = 0; i < (count * CFG_SYS_NAND_ECCBYTES);
i += CFG_SYS_NAND_ECCBYTES) {
u32 ce = ecc[i / CFG_SYS_NAND_ECCBYTES];
ce = ~(ce << 2) & 0xFFFFFF;
spare[i+2] = (u8)(ce & 0xFF); ce >>= 8;
spare[i+1] = (u8)(ce & 0xFF); ce >>= 8;
spare[i] = (u8)(ce & 0xFF);
}
return 0;
}
static int lpc32xx_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code)
{
return slc_ecc_copy_to_buffer(ecc_code, ecc_buffer, ECCSTEPS);
}
/*
* Enables and prepares SLC NAND controller
* for doing data transfers with H/W ECC enabled.
*/
static void lpc32xx_hwecc_enable(struct mtd_info *mtd, int mode)
{
/* Clear ECC */
writel(CTRL_ECC_CLEAR, &lpc32xx_nand_slc_regs->ctrl);
/* Setup SLC controller for H/W ECC operations */
setbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_ECC_EN | CFG_DMA_ECC);
}
/*
* lpc32xx_correct_data - [NAND Interface] Detect and correct bit error(s)
* mtd: MTD block structure
* dat: raw data read from the chip
* read_ecc: ECC from the chip
* calc_ecc: the ECC calculated from raw data
*
* Detect and correct a 1 bit error for 256 byte block
*/
int lpc32xx_correct_data(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
unsigned int i;
int ret1, ret2 = 0;
u_char *r = read_ecc;
u_char *c = calc_ecc;
u16 data_offset = 0;
for (i = 0 ; i < ECCSTEPS ; i++) {
r += CFG_SYS_NAND_ECCBYTES;
c += CFG_SYS_NAND_ECCBYTES;
data_offset += CFG_SYS_NAND_ECCSIZE;
ret1 = nand_correct_data(mtd, dat + data_offset, r, c);
if (ret1 < 0)
return -EBADMSG;
else
ret2 += ret1;
}
return ret2;
}
static void lpc32xx_dma_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
lpc32xx_nand_xfer(mtd, buf, len, 1);
}
static void lpc32xx_dma_write_buf(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
lpc32xx_nand_xfer(mtd, buf, len, 0);
}
/* Reuse the logic from "nand_read_page_hwecc()" */
static int lpc32xx_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
int i;
int stat;
uint8_t *p = buf;
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
unsigned int max_bitflips = 0;
/*
* As per the "LPC32x0 and LPC32x0/01 User manual" table 173 notes
* and section 9.7, the NAND SLC & DMA allowed single DMA transaction
* of a page size using DMA controller scatter/gather mode through
* linked list; the ECC read is done without any software intervention.
*/
lpc32xx_hwecc_enable(mtd, NAND_ECC_READ);
lpc32xx_dma_read_buf(mtd, p, chip->ecc.size * chip->ecc.steps);
lpc32xx_ecc_calculate(mtd, p, &ecc_calc[0]);
lpc32xx_dma_read_buf(mtd, chip->oob_poi, mtd->oobsize);
for (i = 0; i < chip->ecc.total; i++)
ecc_code[i] = chip->oob_poi[eccpos[i]];
stat = chip->ecc.correct(mtd, p, &ecc_code[0], &ecc_calc[0]);
if (stat < 0)
mtd->ecc_stats.failed++;
else {
mtd->ecc_stats.corrected += stat;
max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
return max_bitflips;
}
/* Reuse the logic from "nand_write_page_hwecc()" */
static int lpc32xx_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
int i;
uint8_t *ecc_calc = chip->buffers->ecccalc;
const uint8_t *p = buf;
uint32_t *eccpos = chip->ecc.layout->eccpos;
/*
* As per the "LPC32x0 and LPC32x0/01 User manual" table 173 notes
* and section 9.7, the NAND SLC & DMA allowed single DMA transaction
* of a page size using DMA controller scatter/gather mode through
* linked list; the ECC read is done without any software intervention.
*/
lpc32xx_hwecc_enable(mtd, NAND_ECC_WRITE);
lpc32xx_dma_write_buf(mtd, p, chip->ecc.size * chip->ecc.steps);
lpc32xx_ecc_calculate(mtd, p, &ecc_calc[0]);
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
lpc32xx_dma_write_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
#else
static void lpc32xx_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
while (len-- > 0)
*buf++ = readl(&lpc32xx_nand_slc_regs->data);
}
static void lpc32xx_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
while (len-- > 0)
writel(*buf++, &lpc32xx_nand_slc_regs->data);
}
#endif
static uint8_t lpc32xx_read_byte(struct mtd_info *mtd)
{
return readl(&lpc32xx_nand_slc_regs->data);
}
static void lpc32xx_write_byte(struct mtd_info *mtd, uint8_t byte)
{
writel(byte, &lpc32xx_nand_slc_regs->data);
}
/*
* LPC32xx has only one SLC NAND controller, don't utilize
* CONFIG_SYS_NAND_SELF_INIT to be able to reuse this function
* both in SPL NAND and U-Boot images.
*/
int board_nand_init(struct nand_chip *lpc32xx_chip)
{
#if defined(CONFIG_DMA_LPC32XX) && !defined(CONFIG_XPL_BUILD)
int ret;
/* Acquire a channel for our use */
ret = lpc32xx_dma_get_channel();
if (unlikely(ret < 0)) {
pr_info("Unable to get free DMA channel for NAND transfers\n");
return -1;
}
dmachan = (unsigned int)ret;
#endif
lpc32xx_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
lpc32xx_chip->dev_ready = lpc32xx_nand_dev_ready;
/*
* The implementation of these functions is quite common, but
* they MUST be defined, because access to data register
* is strictly 32-bit aligned.
*/
lpc32xx_chip->read_byte = lpc32xx_read_byte;
lpc32xx_chip->write_byte = lpc32xx_write_byte;
#if defined(CONFIG_DMA_LPC32XX) && !defined(CONFIG_XPL_BUILD)
/* Hardware ECC calculation is supported when DMA driver is selected */
lpc32xx_chip->ecc.mode = NAND_ECC_HW;
lpc32xx_chip->read_buf = lpc32xx_dma_read_buf;
lpc32xx_chip->write_buf = lpc32xx_dma_write_buf;
lpc32xx_chip->ecc.calculate = lpc32xx_ecc_calculate;
lpc32xx_chip->ecc.correct = lpc32xx_correct_data;
lpc32xx_chip->ecc.hwctl = lpc32xx_hwecc_enable;
lpc32xx_chip->chip_delay = 2000;
lpc32xx_chip->ecc.read_page = lpc32xx_read_page_hwecc;
lpc32xx_chip->ecc.write_page = lpc32xx_write_page_hwecc;
lpc32xx_chip->options |= NAND_NO_SUBPAGE_WRITE;
#else
/*
* Hardware ECC calculation is not supported by the driver,
* because it requires DMA support, see LPC32x0 User Manual,
* note after SLC_ECC register description (UM10326, p.198)
*/
lpc32xx_chip->ecc.mode = NAND_ECC_SOFT;
/*
* The implementation of these functions is quite common, but
* they MUST be defined, because access to data register
* is strictly 32-bit aligned.
*/
lpc32xx_chip->read_buf = lpc32xx_read_buf;
lpc32xx_chip->write_buf = lpc32xx_write_buf;
#endif
/*
* These values are predefined
* for both small and large page NAND flash devices.
*/
lpc32xx_chip->ecc.size = CFG_SYS_NAND_ECCSIZE;
lpc32xx_chip->ecc.bytes = CFG_SYS_NAND_ECCBYTES;
lpc32xx_chip->ecc.strength = 1;
if (CONFIG_SYS_NAND_PAGE_SIZE != NAND_LARGE_BLOCK_PAGE_SIZE)
lpc32xx_chip->ecc.layout = &lpc32xx_nand_oob_16;
#if defined(CONFIG_SYS_NAND_USE_FLASH_BBT)
lpc32xx_chip->bbt_options |= NAND_BBT_USE_FLASH;
#endif
/* Initialize NAND interface */
lpc32xx_nand_init();
return 0;
}

View File

@ -1,602 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Macronix International Co., Ltd.
*
* Author:
* Zhengxun Li <zhengxunli@mxic.com.tw>
*/
#include <clk.h>
#include <dm.h>
#include <malloc.h>
#include <nand.h>
#include <asm/io.h>
#include <asm/arch/hardware.h>
#include <dm/device_compat.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/delay.h>
#define HC_CFG 0x0
#define HC_CFG_IF_CFG(x) ((x) << 27)
#define HC_CFG_DUAL_SLAVE BIT(31)
#define HC_CFG_INDIVIDUAL BIT(30)
#define HC_CFG_NIO(x) (((x) / 4) << 27)
#define HC_CFG_TYPE(s, t) ((t) << (23 + ((s) * 2)))
#define HC_CFG_TYPE_SPI_NOR 0
#define HC_CFG_TYPE_SPI_NAND 1
#define HC_CFG_TYPE_SPI_RAM 2
#define HC_CFG_TYPE_RAW_NAND 3
#define HC_CFG_SLV_ACT(x) ((x) << 21)
#define HC_CFG_CLK_PH_EN BIT(20)
#define HC_CFG_CLK_POL_INV BIT(19)
#define HC_CFG_BIG_ENDIAN BIT(18)
#define HC_CFG_DATA_PASS BIT(17)
#define HC_CFG_IDLE_SIO_LVL(x) ((x) << 16)
#define HC_CFG_MAN_START_EN BIT(3)
#define HC_CFG_MAN_START BIT(2)
#define HC_CFG_MAN_CS_EN BIT(1)
#define HC_CFG_MAN_CS_ASSERT BIT(0)
#define INT_STS 0x4
#define INT_STS_EN 0x8
#define INT_SIG_EN 0xc
#define INT_STS_ALL GENMASK(31, 0)
#define INT_RDY_PIN BIT(26)
#define INT_RDY_SR BIT(25)
#define INT_LNR_SUSP BIT(24)
#define INT_ECC_ERR BIT(17)
#define INT_CRC_ERR BIT(16)
#define INT_LWR_DIS BIT(12)
#define INT_LRD_DIS BIT(11)
#define INT_SDMA_INT BIT(10)
#define INT_DMA_FINISH BIT(9)
#define INT_RX_NOT_FULL BIT(3)
#define INT_RX_NOT_EMPTY BIT(2)
#define INT_TX_NOT_FULL BIT(1)
#define INT_TX_EMPTY BIT(0)
#define HC_EN 0x10
#define HC_EN_BIT BIT(0)
#define TXD(x) (0x14 + ((x) * 4))
#define RXD 0x24
#define SS_CTRL(s) (0x30 + ((s) * 4))
#define LRD_CFG 0x44
#define LWR_CFG 0x80
#define RWW_CFG 0x70
#define OP_READ BIT(23)
#define OP_DUMMY_CYC(x) ((x) << 17)
#define OP_ADDR_BYTES(x) ((x) << 14)
#define OP_CMD_BYTES(x) (((x) - 1) << 13)
#define OP_OCTA_CRC_EN BIT(12)
#define OP_DQS_EN BIT(11)
#define OP_ENHC_EN BIT(10)
#define OP_PREAMBLE_EN BIT(9)
#define OP_DATA_DDR BIT(8)
#define OP_DATA_BUSW(x) ((x) << 6)
#define OP_ADDR_DDR BIT(5)
#define OP_ADDR_BUSW(x) ((x) << 3)
#define OP_CMD_DDR BIT(2)
#define OP_CMD_BUSW(x) (x)
#define OP_BUSW_1 0
#define OP_BUSW_2 1
#define OP_BUSW_4 2
#define OP_BUSW_8 3
#define OCTA_CRC 0x38
#define OCTA_CRC_IN_EN(s) BIT(3 + ((s) * 16))
#define OCTA_CRC_CHUNK(s, x) ((fls((x) / 32)) << (1 + ((s) * 16)))
#define OCTA_CRC_OUT_EN(s) BIT(0 + ((s) * 16))
#define ONFI_DIN_CNT(s) (0x3c + (s))
#define LRD_CTRL 0x48
#define RWW_CTRL 0x74
#define LWR_CTRL 0x84
#define LMODE_EN BIT(31)
#define LMODE_SLV_ACT(x) ((x) << 21)
#define LMODE_CMD1(x) ((x) << 8)
#define LMODE_CMD0(x) (x)
#define LRD_ADDR 0x4c
#define LWR_ADDR 0x88
#define LRD_RANGE 0x50
#define LWR_RANGE 0x8c
#define AXI_SLV_ADDR 0x54
#define DMAC_RD_CFG 0x58
#define DMAC_WR_CFG 0x94
#define DMAC_CFG_PERIPH_EN BIT(31)
#define DMAC_CFG_ALLFLUSH_EN BIT(30)
#define DMAC_CFG_LASTFLUSH_EN BIT(29)
#define DMAC_CFG_QE(x) (((x) + 1) << 16)
#define DMAC_CFG_BURST_LEN(x) (((x) + 1) << 12)
#define DMAC_CFG_BURST_SZ(x) ((x) << 8)
#define DMAC_CFG_DIR_READ BIT(1)
#define DMAC_CFG_START BIT(0)
#define DMAC_RD_CNT 0x5c
#define DMAC_WR_CNT 0x98
#define SDMA_ADDR 0x60
#define DMAM_CFG 0x64
#define DMAM_CFG_START BIT(31)
#define DMAM_CFG_CONT BIT(30)
#define DMAM_CFG_SDMA_GAP(x) (fls((x) / 8192) << 2)
#define DMAM_CFG_DIR_READ BIT(1)
#define DMAM_CFG_EN BIT(0)
#define DMAM_CNT 0x68
#define LNR_TIMER_TH 0x6c
#define RDM_CFG0 0x78
#define RDM_CFG0_POLY(x) (x)
#define RDM_CFG1 0x7c
#define RDM_CFG1_RDM_EN BIT(31)
#define RDM_CFG1_SEED(x) (x)
#define LWR_SUSP_CTRL 0x90
#define LWR_SUSP_CTRL_EN BIT(31)
#define DMAS_CTRL 0x9c
#define DMAS_CTRL_EN BIT(31)
#define DMAS_CTRL_DIR_READ BIT(30)
#define DATA_STROB 0xa0
#define DATA_STROB_EDO_EN BIT(2)
#define DATA_STROB_INV_POL BIT(1)
#define DATA_STROB_DELAY_2CYC BIT(0)
#define IDLY_CODE(x) (0xa4 + ((x) * 4))
#define IDLY_CODE_VAL(x, v) ((v) << (((x) % 4) * 8))
#define GPIO 0xc4
#define GPIO_PT(x) BIT(3 + ((x) * 16))
#define GPIO_RESET(x) BIT(2 + ((x) * 16))
#define GPIO_HOLDB(x) BIT(1 + ((x) * 16))
#define GPIO_WPB(x) BIT((x) * 16)
#define HC_VER 0xd0
#define HW_TEST(x) (0xe0 + ((x) * 4))
#define MXIC_NFC_MAX_CLK_HZ 50000000
#define IRQ_TIMEOUT 1000
struct mxic_nand_ctrl {
struct clk *send_clk;
struct clk *send_dly_clk;
void __iomem *regs;
struct nand_chip nand_chip;
};
/*
* struct mxic_nfc_command_format - Defines NAND flash command format
* @start_cmd: First cycle command (Start command)
* @end_cmd: Second cycle command (Last command)
* @addr_len: Number of address cycles required to send the address
* @read: Direction of command
*/
struct mxic_nfc_command_format {
int start_cmd;
int end_cmd;
u8 addr_len;
bool read;
};
/* The NAND flash operations command format */
static const struct mxic_nfc_command_format mxic_nand_commands[] = {
{NAND_CMD_READ0, NAND_CMD_READSTART, 5, 1 },
{NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART, 2, 1 },
{NAND_CMD_READID, NAND_CMD_NONE, 1, 1 },
{NAND_CMD_STATUS, NAND_CMD_NONE, 0, 1 },
{NAND_CMD_SEQIN, NAND_CMD_NONE, 5, 0 },
{NAND_CMD_PAGEPROG, NAND_CMD_NONE, 0, 0 },
{NAND_CMD_CACHEDPROG, NAND_CMD_NONE, 0, 0 },
{NAND_CMD_RNDIN, NAND_CMD_NONE, 2, 0 },
{NAND_CMD_ERASE1, NAND_CMD_NONE, 3, 0 },
{NAND_CMD_ERASE2, NAND_CMD_NONE, 0, 0 },
{NAND_CMD_RESET, NAND_CMD_NONE, 0, 0 },
{NAND_CMD_PARAM, NAND_CMD_NONE, 1, 1 },
{NAND_CMD_GET_FEATURES, NAND_CMD_NONE, 1, 1 },
{NAND_CMD_SET_FEATURES, NAND_CMD_NONE, 1, 0 },
{NAND_CMD_NONE, NAND_CMD_NONE, 0, 0 },
};
static int mxic_nfc_clk_enable(struct mxic_nand_ctrl *nfc)
{
int ret;
ret = clk_prepare_enable(nfc->send_clk);
if (ret)
return ret;
ret = clk_prepare_enable(nfc->send_dly_clk);
if (ret)
goto err_send_dly_clk;
return ret;
err_send_dly_clk:
clk_disable_unprepare(nfc->send_clk);
return ret;
}
static void mxic_nfc_clk_disable(struct mxic_nand_ctrl *nfc)
{
clk_disable_unprepare(nfc->send_clk);
clk_disable_unprepare(nfc->send_dly_clk);
}
static void mxic_nfc_set_input_delay(struct mxic_nand_ctrl *nfc, u8 idly_code)
{
writel(IDLY_CODE_VAL(0, idly_code) |
IDLY_CODE_VAL(1, idly_code) |
IDLY_CODE_VAL(2, idly_code) |
IDLY_CODE_VAL(3, idly_code),
nfc->regs + IDLY_CODE(0));
writel(IDLY_CODE_VAL(4, idly_code) |
IDLY_CODE_VAL(5, idly_code) |
IDLY_CODE_VAL(6, idly_code) |
IDLY_CODE_VAL(7, idly_code),
nfc->regs + IDLY_CODE(1));
}
static int mxic_nfc_clk_setup(struct mxic_nand_ctrl *nfc, unsigned long freq)
{
int ret;
ret = clk_set_rate(nfc->send_clk, freq);
if (ret)
return ret;
ret = clk_set_rate(nfc->send_dly_clk, freq);
if (ret)
return ret;
/*
* A constant delay range from 0x0 ~ 0x1F for input delay,
* the unit is 78 ps, the max input delay is 2.418 ns.
*/
mxic_nfc_set_input_delay(nfc, 0xf);
return 0;
}
static int mxic_nfc_set_freq(struct mxic_nand_ctrl *nfc, unsigned long freq)
{
int ret;
if (freq > MXIC_NFC_MAX_CLK_HZ)
freq = MXIC_NFC_MAX_CLK_HZ;
mxic_nfc_clk_disable(nfc);
ret = mxic_nfc_clk_setup(nfc, freq);
if (ret)
return ret;
ret = mxic_nfc_clk_enable(nfc);
if (ret)
return ret;
return 0;
}
static void mxic_nfc_hw_init(struct mxic_nand_ctrl *nfc)
{
writel(HC_CFG_NIO(8) | HC_CFG_TYPE(1, HC_CFG_TYPE_RAW_NAND) |
HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN |
HC_CFG_IDLE_SIO_LVL(1), nfc->regs + HC_CFG);
writel(INT_STS_ALL, nfc->regs + INT_STS_EN);
writel(INT_RDY_PIN, nfc->regs + INT_SIG_EN);
writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
writel(0, nfc->regs + LRD_CFG);
writel(0, nfc->regs + LRD_CTRL);
writel(0x0, nfc->regs + HC_EN);
}
static void mxic_nfc_cs_enable(struct mxic_nand_ctrl *nfc)
{
writel(readl(nfc->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
nfc->regs + HC_CFG);
writel(HC_CFG_MAN_CS_ASSERT | readl(nfc->regs + HC_CFG),
nfc->regs + HC_CFG);
}
static void mxic_nfc_cs_disable(struct mxic_nand_ctrl *nfc)
{
writel(~HC_CFG_MAN_CS_ASSERT & readl(nfc->regs + HC_CFG),
nfc->regs + HC_CFG);
}
static int mxic_nfc_data_xfer(struct mxic_nand_ctrl *nfc, const void *txbuf,
void *rxbuf, unsigned int len)
{
unsigned int pos = 0;
while (pos < len) {
unsigned int nbytes = len - pos;
u32 data = 0xffffffff;
u32 sts;
int ret;
if (nbytes > 4)
nbytes = 4;
if (txbuf)
memcpy(&data, txbuf + pos, nbytes);
ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
sts & INT_TX_EMPTY, 1000000);
if (ret)
return ret;
writel(data, nfc->regs + TXD(nbytes % 4));
ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
sts & INT_TX_EMPTY, 1000000);
if (ret)
return ret;
ret = readl_poll_timeout(nfc->regs + INT_STS, sts,
sts & INT_RX_NOT_EMPTY, 1000000);
if (ret)
return ret;
data = readl(nfc->regs + RXD);
if (rxbuf) {
data >>= (8 * (4 - nbytes));
memcpy(rxbuf + pos, &data, nbytes);
}
WARN_ON(readl(nfc->regs + INT_STS) & INT_RX_NOT_EMPTY);
pos += nbytes;
}
return 0;
}
static uint8_t mxic_nfc_read_byte(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mxic_nand_ctrl *nfc = nand_get_controller_data(chip);
u8 data;
writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
OP_READ, nfc->regs + SS_CTRL(0));
mxic_nfc_data_xfer(nfc, NULL, &data, 1);
return data;
}
static void mxic_nfc_read_buf(struct mtd_info *mtd, uint8_t *rxbuf, int rlen)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mxic_nand_ctrl *nfc = nand_get_controller_data(chip);
writel(0x0, nfc->regs + ONFI_DIN_CNT(0));
writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
OP_READ, nfc->regs + SS_CTRL(0));
mxic_nfc_data_xfer(nfc, NULL, rxbuf, rlen);
}
static void mxic_nfc_write_buf(struct mtd_info *mtd, const uint8_t *txbuf,
int wlen)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mxic_nand_ctrl *nfc = nand_get_controller_data(chip);
writel(wlen, nfc->regs + ONFI_DIN_CNT(0));
writel(OP_DATA_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F),
nfc->regs + SS_CTRL(0));
mxic_nfc_data_xfer(nfc, txbuf, NULL, wlen);
}
static void mxic_nfc_cmd_function(struct mtd_info *mtd, unsigned int command,
int column, int page_addr)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mxic_nand_ctrl *nfc = nand_get_controller_data(chip);
const struct mxic_nfc_command_format *cmd = NULL;
u32 sts;
u8 index, addr[5];
/* Emulate NAND_CMD_READOOB */
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
command = NAND_CMD_READ0;
}
/* Get the command format */
for (index = 0; index < ARRAY_SIZE(mxic_nand_commands); index++)
if (command == mxic_nand_commands[index].start_cmd)
break;
cmd = &mxic_nand_commands[index];
if (!(command == NAND_CMD_PAGEPROG ||
command == NAND_CMD_CACHEDPROG ||
command == NAND_CMD_ERASE2))
mxic_nfc_cs_disable(nfc);
mxic_nfc_cs_enable(nfc);
if (column != -1) {
addr[0] = column;
addr[1] = column >> 8;
if (page_addr != -1) {
addr[2] = page_addr;
addr[3] = page_addr >> 8;
addr[4] = page_addr >> 16;
}
} else if (page_addr != -1) {
addr[0] = page_addr;
addr[1] = page_addr >> 8;
addr[2] = page_addr >> 16;
}
writel(0, nfc->regs + HC_EN);
writel(HC_EN_BIT, nfc->regs + HC_EN);
writel(OP_CMD_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) | OP_CMD_BYTES(0),
nfc->regs + SS_CTRL(0));
mxic_nfc_data_xfer(nfc, &cmd->start_cmd, NULL, 1);
if (cmd->addr_len) {
writel(OP_ADDR_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
OP_ADDR_BYTES(cmd->addr_len), nfc->regs + SS_CTRL(0));
mxic_nfc_data_xfer(nfc, &addr, NULL, cmd->addr_len);
}
if (cmd->end_cmd != NAND_CMD_NONE) {
writel(0, nfc->regs + HC_EN);
writel(HC_EN_BIT, nfc->regs + HC_EN);
writel(OP_CMD_BUSW(OP_BUSW_8) | OP_DUMMY_CYC(0x3F) |
OP_CMD_BYTES(0), nfc->regs + SS_CTRL(0));
mxic_nfc_data_xfer(nfc, &cmd->end_cmd, NULL, 1);
}
readl_poll_timeout(nfc->regs + INT_STS, sts, sts & INT_RDY_PIN,
1000000);
if (command == NAND_CMD_PAGEPROG ||
command == NAND_CMD_CACHEDPROG ||
command == NAND_CMD_ERASE2 ||
command == NAND_CMD_RESET) {
mxic_nfc_cs_disable(nfc);
}
}
static int mxic_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mxic_nand_ctrl *nfc = nand_get_controller_data(chip);
const struct nand_sdr_timings *sdr;
unsigned long freq;
int ret;
sdr = nand_get_sdr_timings(conf);
if (IS_ERR(sdr))
return PTR_ERR(sdr);
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
freq = 1000000000 / (sdr->tRC_min / 1000);
ret = mxic_nfc_set_freq(nfc, freq);
if (ret)
WARN_ON("Set freq failed\n");
if (sdr->tRC_min < 30000)
writel(DATA_STROB_EDO_EN, nfc->regs + DATA_STROB);
return 0;
}
/* Dummy implementation: we don't support multiple chips */
static void mxic_nfc_select_chip(struct mtd_info *mtd, int chipnr)
{
switch (chipnr) {
case -1:
case 0:
break;
default:
BUG();
}
}
static int mxic_nfc_probe(struct udevice *dev)
{
struct mxic_nand_ctrl *nfc = dev_get_priv(dev);
struct nand_chip *nand_chip = &nfc->nand_chip;
struct mtd_info *mtd;
ofnode child;
int err;
nfc->regs = dev_read_addr_ptr(dev);
nfc->send_clk = devm_clk_get(dev, "send");
if (IS_ERR(nfc->send_clk))
return PTR_ERR(nfc->send_clk);
nfc->send_dly_clk = devm_clk_get(dev, "send_dly");
if (IS_ERR(nfc->send_dly_clk))
return PTR_ERR(nfc->send_dly_clk);
mtd = nand_to_mtd(nand_chip);
ofnode_for_each_subnode(child, dev_ofnode(dev))
nand_set_flash_node(nand_chip, child);
nand_set_controller_data(nand_chip, nfc);
nand_chip->select_chip = mxic_nfc_select_chip;
nand_chip->setup_data_interface = mxic_nfc_setup_data_interface;
nand_chip->cmdfunc = mxic_nfc_cmd_function;
nand_chip->read_byte = mxic_nfc_read_byte;
nand_chip->read_buf = mxic_nfc_read_buf;
nand_chip->write_buf = mxic_nfc_write_buf;
mxic_nfc_hw_init(nfc);
err = nand_scan(mtd, 1);
if (err)
return err;
err = nand_register(0, mtd);
if (err) {
dev_err(dev, "Failed to register MTD: %d\n", err);
return err;
}
return 0;
}
static const struct udevice_id mxic_nfc_of_ids[] = {
{ .compatible = "mxic,multi-itfc-v009-nand-controller" },
{ /* Sentinel */ }
};
U_BOOT_DRIVER(mxic_nfc) = {
.name = "mxic_nfc",
.id = UCLASS_MTD,
.of_match = mxic_nfc_of_ids,
.probe = mxic_nfc_probe,
.priv_auto = sizeof(struct mxic_nand_ctrl),
};
void board_nand_init(void)
{
struct udevice *dev;
int ret;
ret = uclass_get_device_by_driver(UCLASS_MTD,
DM_DRIVER_GET(mxic_nfc), &dev);
if (ret && ret != -ENODEV)
pr_err("Failed to initialize %s. (error %d)\n", dev->name,
ret);
}

View File

@ -1640,10 +1640,12 @@ int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
nand->setup_data_interface = mxs_nand_setup_interface; nand->setup_data_interface = mxs_nand_setup_interface;
/* first scan to find the device and get the page size */ /* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) err = nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL);
if (err)
goto err_free_buffers; goto err_free_buffers;
if (mxs_nand_setup_ecc(mtd)) err = mxs_nand_setup_ecc(mtd);
if (err)
goto err_free_buffers; goto err_free_buffers;
nand->ecc.read_page = mxs_nand_ecc_read_page; nand->ecc.read_page = mxs_nand_ecc_read_page;

View File

@ -1,422 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Marvell International Ltd.
*/
#include <dm.h>
#include <dm/of_access.h>
#include <malloc.h>
#include <memalign.h>
#include <nand.h>
#include <pci.h>
#include <pci_ids.h>
#include <time.h>
#include <linux/bitfield.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/libfdt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand_bch.h>
#include <linux/mtd/nand_ecc.h>
#include <asm/io.h>
#include <asm/types.h>
#include <asm/dma-mapping.h>
#include <asm/arch/clock.h>
#include "octeontx_bch.h"
static LIST_HEAD(octeontx_bch_devices);
static unsigned int num_vfs = BCH_NR_VF;
static void *bch_pf;
static void *bch_vf;
static void *token;
static bool bch_pf_initialized;
static bool bch_vf_initialized;
static int pci_enable_sriov(struct udevice *dev, int nr_virtfn)
{
int ret;
ret = pci_sriov_init(dev, nr_virtfn);
if (ret)
printf("%s(%s): pci_sriov_init returned %d\n", __func__,
dev->name, ret);
return ret;
}
void *octeontx_bch_getv(void)
{
if (!bch_vf)
return NULL;
if (bch_vf_initialized && bch_pf_initialized)
return bch_vf;
else
return NULL;
}
void octeontx_bch_putv(void *token)
{
bch_vf_initialized = !!token;
bch_vf = token;
}
void *octeontx_bch_getp(void)
{
return token;
}
void octeontx_bch_putp(void *token)
{
bch_pf = token;
bch_pf_initialized = !!token;
}
static int do_bch_init(struct bch_device *bch)
{
return 0;
}
static void bch_reset(struct bch_device *bch)
{
writeq(1, bch->reg_base + BCH_CTL);
mdelay(2);
}
static void bch_disable(struct bch_device *bch)
{
writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C);
writeq(~0ull, bch->reg_base + BCH_ERR_INT);
bch_reset(bch);
}
static u32 bch_check_bist_status(struct bch_device *bch)
{
return readq(bch->reg_base + BCH_BIST_RESULT);
}
static int bch_device_init(struct bch_device *bch)
{
u64 bist;
int rc;
debug("%s: Resetting...\n", __func__);
/* Reset the PF when probed first */
bch_reset(bch);
debug("%s: Checking BIST...\n", __func__);
/* Check BIST status */
bist = (u64)bch_check_bist_status(bch);
if (bist) {
dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist);
return -ENODEV;
}
/* Get max VQs/VFs supported by the device */
bch->max_vfs = pci_sriov_get_totalvfs(bch->dev);
debug("%s: %d vfs\n", __func__, bch->max_vfs);
if (num_vfs > bch->max_vfs) {
dev_warn(dev, "Num of VFs to enable %d is greater than max available. Enabling %d VFs.\n",
num_vfs, bch->max_vfs);
num_vfs = bch->max_vfs;
}
bch->vfs_enabled = bch->max_vfs;
/* Get number of VQs/VFs to be enabled */
/* TODO: Get CLK frequency */
/* Reset device parameters */
debug("%s: Doing initialization\n", __func__);
rc = do_bch_init(bch);
return rc;
}
static int bch_sriov_configure(struct udevice *dev, int numvfs)
{
struct bch_device *bch = dev_get_priv(dev);
int ret = -EBUSY;
debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__,
dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled);
if (bch->vfs_in_use)
goto exit;
ret = 0;
if (numvfs > 0) {
debug("%s: Enabling sriov\n", __func__);
ret = pci_enable_sriov(dev, numvfs);
if (ret == 0) {
bch->flags |= BCH_FLAG_SRIOV_ENABLED;
ret = numvfs;
bch->vfs_enabled = numvfs;
}
}
debug("VFs enabled: %d\n", ret);
exit:
debug("%s: Returning %d\n", __func__, ret);
return ret;
}
static int octeontx_pci_bchpf_probe(struct udevice *dev)
{
struct bch_device *bch;
int ret;
debug("%s(%s)\n", __func__, dev->name);
bch = dev_get_priv(dev);
if (!bch)
return -ENOMEM;
bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
PCI_REGION_TYPE, PCI_REGION_MEM);
bch->dev = dev;
debug("%s: base address: %p\n", __func__, bch->reg_base);
ret = bch_device_init(bch);
if (ret) {
printf("%s(%s): init returned %d\n", __func__, dev->name, ret);
return ret;
}
INIT_LIST_HEAD(&bch->list);
list_add(&bch->list, &octeontx_bch_devices);
token = (void *)dev;
debug("%s: Configuring SRIOV\n", __func__);
bch_sriov_configure(dev, num_vfs);
debug("%s: Done.\n", __func__);
octeontx_bch_putp(bch);
return 0;
}
static const struct pci_device_id octeontx_bchpf_pci_id_table[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) },
{},
};
static const struct pci_device_id octeontx_bchvf_pci_id_table[] = {
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)},
{},
};
/**
* Given a data block calculate the ecc data and fill in the response
*
* @param[in] block 8-byte aligned pointer to data block to calculate ECC
* @param block_size Size of block in bytes, must be a multiple of two.
* @param bch_level Number of errors that must be corrected. The number of
* parity bytes is equal to ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] ecc 8-byte aligned pointer to where ecc data should go
* @param[in] resp pointer to where responses will be written.
*
* Return: Zero on success, negative on failure.
*/
int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
u8 bch_level, dma_addr_t ecc, dma_addr_t resp)
{
union bch_cmd cmd;
int rc;
memset(&cmd, 0, sizeof(cmd));
cmd.s.cword.ecc_gen = eg_gen;
cmd.s.cword.ecc_level = bch_level;
cmd.s.cword.size = block_size;
cmd.s.oword.ptr = ecc;
cmd.s.iword.ptr = block;
cmd.s.rword.ptr = resp;
rc = octeontx_cmd_queue_write(QID_BCH, 1,
sizeof(cmd) / sizeof(uint64_t), cmd.u);
if (rc)
return -1;
octeontx_bch_write_doorbell(1, vf);
return 0;
}
/**
* Given a data block and ecc data correct the data block
*
* @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
* data concatenated to the end to correct
* @param block_size Size of block in bytes, must be a multiple of
* two.
* @param bch_level Number of errors that must be corrected. The
* number of parity bytes is equal to
* ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] block_out 8-byte aligned pointer to corrected data buffer.
* This should not be the same as block_ecc_in.
* @param[in] resp pointer to where responses will be written.
*
* Return: Zero on success, negative on failure.
*/
int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
u16 block_size, u8 bch_level,
dma_addr_t block_out, dma_addr_t resp)
{
union bch_cmd cmd;
int rc;
memset(&cmd, 0, sizeof(cmd));
cmd.s.cword.ecc_gen = eg_correct;
cmd.s.cword.ecc_level = bch_level;
cmd.s.cword.size = block_size;
cmd.s.oword.ptr = block_out;
cmd.s.iword.ptr = block_ecc_in;
cmd.s.rword.ptr = resp;
rc = octeontx_cmd_queue_write(QID_BCH, 1,
sizeof(cmd) / sizeof(uint64_t), cmd.u);
if (rc)
return -1;
octeontx_bch_write_doorbell(1, vf);
return 0;
}
EXPORT_SYMBOL(octeontx_bch_decode);
int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
dma_addr_t handle)
{
ulong start = get_timer(0);
__iormb(); /* HW is updating *resp */
while (!resp->s.done && get_timer(start) < 10)
__iormb(); /* HW is updating *resp */
if (resp->s.done)
return 0;
return -ETIMEDOUT;
}
struct bch_q octeontx_bch_q[QID_MAX];
static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id,
int max_depth, int fpa_pool,
int pool_size)
{
/* some params are for later merge with CPT or cn83xx */
struct bch_q *q = &octeontx_bch_q[queue_id];
unsigned long paddr;
u64 *chunk_buffer;
int chunk = max_depth + 1;
int i, size;
if ((unsigned int)queue_id >= QID_MAX)
return -EINVAL;
if (max_depth & chunk) /* must be 2^N - 1 */
return -EINVAL;
size = NQS * chunk * sizeof(u64);
chunk_buffer = dma_alloc_coherent(size, &paddr);
if (!chunk_buffer)
return -ENOMEM;
q->base_paddr = paddr;
q->dev = dev;
q->index = 0;
q->max_depth = max_depth;
q->pool_size_m1 = pool_size;
q->base_vaddr = chunk_buffer;
for (i = 0; i < NQS; i++) {
u64 *ixp;
int inext = (i + 1) * chunk - 1;
int j = (i + 1) % NQS;
int jnext = j * chunk;
dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64);
ixp = &chunk_buffer[inext];
*ixp = jbase;
}
return 0;
}
static int octeontx_pci_bchvf_probe(struct udevice *dev)
{
struct bch_vf *vf;
union bch_vqx_ctl ctl;
union bch_vqx_cmd_buf cbuf;
int err;
debug("%s(%s)\n", __func__, dev->name);
vf = dev_get_priv(dev);
if (!vf)
return -ENOMEM;
vf->dev = dev;
/* Map PF's configuration registers */
vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
PCI_REGION_TYPE, PCI_REGION_MEM);
debug("%s: reg base: %p\n", __func__, vf->reg_base);
err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0,
sizeof(union bch_cmd) * QDEPTH);
if (err) {
dev_err(dev, "octeontx_cmd_queue_initialize() failed\n");
goto release;
}
ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0));
cbuf.u = 0;
cbuf.s.ldwb = 1;
cbuf.s.dfb = 1;
cbuf.s.size = QDEPTH;
writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0));
writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0));
writeq(octeontx_bch_q[QID_BCH].base_paddr,
vf->reg_base + BCH_VQX_CMD_PTR(0));
octeontx_bch_putv(vf);
debug("%s: bch vf initialization complete\n", __func__);
if (octeontx_bch_getv())
return octeontx_pci_nand_deferred_probe();
return -1;
release:
return err;
}
static int octeontx_pci_bchpf_remove(struct udevice *dev)
{
struct bch_device *bch = dev_get_priv(dev);
bch_disable(bch);
return 0;
}
U_BOOT_DRIVER(octeontx_pci_bchpf) = {
.name = BCHPF_DRIVER_NAME,
.id = UCLASS_MISC,
.probe = octeontx_pci_bchpf_probe,
.remove = octeontx_pci_bchpf_remove,
.priv_auto = sizeof(struct bch_device),
.flags = DM_FLAG_OS_PREPARE,
};
U_BOOT_DRIVER(octeontx_pci_bchvf) = {
.name = BCHVF_DRIVER_NAME,
.id = UCLASS_MISC,
.probe = octeontx_pci_bchvf_probe,
.priv_auto = sizeof(struct bch_vf),
};
U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table);
U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table);

View File

@ -1,131 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __OCTEONTX_BCH_H__
#define __OCTEONTX_BCH_H__
#include "octeontx_bch_regs.h"
/* flags to indicate the features supported */
#define BCH_FLAG_SRIOV_ENABLED BIT(1)
/*
* BCH Registers map for 81xx
*/
/* PF registers */
#define BCH_CTL 0x0ull
#define BCH_ERR_CFG 0x10ull
#define BCH_BIST_RESULT 0x80ull
#define BCH_ERR_INT 0x88ull
#define BCH_ERR_INT_W1S 0x90ull
#define BCH_ERR_INT_ENA_W1C 0xA0ull
#define BCH_ERR_INT_ENA_W1S 0xA8ull
/* VF registers */
#define BCH_VQX_CTL(z) 0x0ull
#define BCH_VQX_CMD_BUF(z) 0x8ull
#define BCH_VQX_CMD_PTR(z) 0x20ull
#define BCH_VQX_DOORBELL(z) 0x800ull
#define BCHPF_DRIVER_NAME "octeontx-bchpf"
#define BCHVF_DRIVER_NAME "octeontx-bchvf"
struct bch_device {
struct list_head list;
u8 max_vfs;
u8 vfs_enabled;
u8 vfs_in_use;
u32 flags;
void __iomem *reg_base;
struct udevice *dev;
};
struct bch_vf {
u16 flags;
u8 vfid;
u8 node;
u8 priority;
struct udevice *dev;
void __iomem *reg_base;
};
struct buf_ptr {
u8 *vptr;
dma_addr_t dma_addr;
u16 size;
};
void *octeontx_bch_getv(void);
void octeontx_bch_putv(void *token);
void *octeontx_bch_getp(void);
void octeontx_bch_putp(void *token);
int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
dma_addr_t handle);
/**
* Given a data block calculate the ecc data and fill in the response
*
* @param[in] block 8-byte aligned pointer to data block to calculate ECC
* @param block_size Size of block in bytes, must be a multiple of two.
* @param bch_level Number of errors that must be corrected. The number of
* parity bytes is equal to ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] ecc 8-byte aligned pointer to where ecc data should go
* @param[in] resp pointer to where responses will be written.
*
* Return: Zero on success, negative on failure.
*/
int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
u8 bch_level, dma_addr_t ecc, dma_addr_t resp);
/**
* Given a data block and ecc data correct the data block
*
* @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
* data concatenated to the end to correct
* @param block_size Size of block in bytes, must be a multiple of
* two.
* @param bch_level Number of errors that must be corrected. The
* number of parity bytes is equal to
* ((15 * bch_level) + 7) / 8.
* Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
* @param[out] block_out 8-byte aligned pointer to corrected data buffer.
* This should not be the same as block_ecc_in.
* @param[in] resp pointer to where responses will be written.
*
* Return: Zero on success, negative on failure.
*/
int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
u16 block_size, u8 bch_level,
dma_addr_t block_out, dma_addr_t resp);
/**
* Ring the BCH doorbell telling it that new commands are
* available.
*
* @param num_commands Number of new commands
* @param vf virtual function handle
*/
static inline void octeontx_bch_write_doorbell(u64 num_commands,
struct bch_vf *vf)
{
u64 num_words = num_commands * sizeof(union bch_cmd) / sizeof(uint64_t);
writeq(num_words, vf->reg_base + BCH_VQX_DOORBELL(0));
}
/**
* Since it's possible (and even likely) that the NAND device will be probed
* before the BCH device has been probed, we may need to defer the probing.
*
* In this case, the initial probe returns success but the actual probing
* is deferred until the BCH VF has been probed.
*
* Return: 0 for success, otherwise error
*/
int octeontx_pci_nand_deferred_probe(void);
#endif /* __OCTEONTX_BCH_H__ */

View File

@ -1,167 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2018 Marvell International Ltd.
*/
#ifndef __OCTEONTX_BCH_REGS_H__
#define __OCTEONTX_BCH_REGS_H__
#define BCH_NR_VF 1
union bch_cmd {
u64 u[4];
struct fields {
struct {
u64 size:12;
u64 reserved_12_31:20;
u64 ecc_level:4;
u64 reserved_36_61:26;
u64 ecc_gen:2;
} cword;
struct {
u64 ptr:49;
u64 reserved_49_55:7;
u64 nc:1;
u64 fw:1;
u64 reserved_58_63:6;
} oword;
struct {
u64 ptr:49;
u64 reserved_49_55:7;
u64 nc:1;
u64 reserved_57_63:7;
} iword;
struct {
u64 ptr:49;
u64 reserved_49_63:15;
} rword;
} s;
};
enum ecc_gen {
eg_correct,
eg_copy,
eg_gen,
eg_copy3,
};
/** Response from BCH instruction */
union bch_resp {
u16 u16;
struct {
u16 num_errors:7; /** Number of errors in block */
u16 zero:6; /** Always zero, ignore */
u16 erased:1; /** Block is erased */
u16 uncorrectable:1;/** too many bits flipped */
u16 done:1; /** Block is done */
} s;
};
union bch_vqx_ctl {
u64 u;
struct {
u64 reserved_0:1;
u64 cmd_be:1;
u64 max_read:4;
u64 reserved_6_15:10;
u64 erase_disable:1;
u64 one_cmd:1;
u64 early_term:4;
u64 reserved_22_63:42;
} s;
};
union bch_vqx_cmd_buf {
u64 u;
struct {
u64 reserved_0_32:33;
u64 size:13;
u64 dfb:1;
u64 ldwb:1;
u64 reserved_48_63:16;
} s;
};
/* keep queue state indexed, even though just one supported here,
* for later generalization to similarly-shaped queues on other Cavium devices
*/
enum {
QID_BCH,
QID_MAX
};
struct bch_q {
struct udevice *dev;
int index;
u16 max_depth;
u16 pool_size_m1;
u64 *base_vaddr;
dma_addr_t base_paddr;
};
extern struct bch_q octeontx_bch_q[QID_MAX];
/* with one dma-mapped area, virt<->phys conversions by +/- (vaddr-paddr) */
static inline dma_addr_t qphys(int qid, void *v)
{
struct bch_q *q = &octeontx_bch_q[qid];
int off = (u8 *)v - (u8 *)q->base_vaddr;
return q->base_paddr + off;
}
#define octeontx_ptr_to_phys(v) qphys(QID_BCH, (v))
static inline void *qvirt(int qid, dma_addr_t p)
{
struct bch_q *q = &octeontx_bch_q[qid];
int off = p - q->base_paddr;
return q->base_vaddr + off;
}
#define octeontx_phys_to_ptr(p) qvirt(QID_BCH, (p))
/* plenty for interleaved r/w on two planes with 16k page, ecc_size 1k */
/* QDEPTH >= 16, as successive chunks must align on 128-byte boundaries */
#define QDEPTH 256 /* u64s in a command queue chunk, incl next-pointer */
#define NQS 1 /* linked chunks in the chain */
/**
* Write an arbitrary number of command words to a command queue.
* This is a generic function; the fixed number of command word
* functions yield higher performance.
*
* Could merge with crypto version for FPA use on cn83xx
*/
static inline int octeontx_cmd_queue_write(int queue_id, bool use_locking,
int cmd_count, const u64 *cmds)
{
int ret = 0;
u64 *cmd_ptr;
struct bch_q *qptr = &octeontx_bch_q[queue_id];
if (unlikely(cmd_count < 1 || cmd_count > 32))
return -EINVAL;
if (unlikely(!cmds))
return -EINVAL;
cmd_ptr = qptr->base_vaddr;
while (cmd_count > 0) {
int slot = qptr->index % (QDEPTH * NQS);
if (slot % QDEPTH != QDEPTH - 1) {
cmd_ptr[slot] = *cmds++;
cmd_count--;
}
qptr->index++;
}
__iowmb(); /* flush commands before ringing bell */
return ret;
}
#endif /* __OCTEONTX_BCH_REGS_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1765,6 +1765,7 @@ static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *in
pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1); pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1);
if (pdata->num_cs != 1) { if (pdata->num_cs != 1) {
pr_err("pxa3xx driver supports single CS only\n"); pr_err("pxa3xx driver supports single CS only\n");
kfree(pdata);
return -EINVAL; return -EINVAL;
} }

View File

@ -942,21 +942,21 @@ static int stm32_fmc2_nfc_probe(struct udevice *dev)
addr = dev_read_addr_index(dev, mem_region); addr = dev_read_addr_index(dev, mem_region);
if (addr == FDT_ADDR_T_NONE) { if (addr == FDT_ADDR_T_NONE) {
dev_err(dev, "Resource data_base not found for cs%d", chip_cs); dev_err(dev, "Resource data_base not found for cs%d", chip_cs);
return ret; return -EINVAL;
} }
nfc->data_base[chip_cs] = addr; nfc->data_base[chip_cs] = addr;
addr = dev_read_addr_index(dev, mem_region + 1); addr = dev_read_addr_index(dev, mem_region + 1);
if (addr == FDT_ADDR_T_NONE) { if (addr == FDT_ADDR_T_NONE) {
dev_err(dev, "Resource cmd_base not found for cs%d", chip_cs); dev_err(dev, "Resource cmd_base not found for cs%d", chip_cs);
return ret; return -EINVAL;
} }
nfc->cmd_base[chip_cs] = addr; nfc->cmd_base[chip_cs] = addr;
addr = dev_read_addr_index(dev, mem_region + 2); addr = dev_read_addr_index(dev, mem_region + 2);
if (addr == FDT_ADDR_T_NONE) { if (addr == FDT_ADDR_T_NONE) {
dev_err(dev, "Resource addr_base not found for cs%d", chip_cs); dev_err(dev, "Resource addr_base not found for cs%d", chip_cs);
return ret; return -EINVAL;
} }
nfc->addr_base[chip_cs] = addr; nfc->addr_base[chip_cs] = addr;
} }

View File

@ -1403,8 +1403,10 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
if (ecc->size != 512 && ecc->size != 1024) if (ecc->size != 512 && ecc->size != 1024) {
kfree(data);
return -EINVAL; return -EINVAL;
}
/* Prefer 1k ECC chunk over 512 ones */ /* Prefer 1k ECC chunk over 512 ones */
if (ecc->size == 512 && mtd->writesize > 512) { if (ecc->size == 512 && mtd->writesize > 512) {
@ -1641,17 +1643,20 @@ static int sunxi_nand_chip_init(struct udevice *dev, struct sunxi_nfc *nfc,
if (ret) { if (ret) {
dev_err(dev, "could not retrieve reg property: %d\n", dev_err(dev, "could not retrieve reg property: %d\n",
ret); ret);
kfree(chip);
return ret; return ret;
} }
if (tmp > NFC_MAX_CS) { if (tmp > NFC_MAX_CS) {
dev_err(dev, dev_err(dev,
"invalid reg value: %u (max CS = 7)\n", tmp); "invalid reg value: %u (max CS = 7)\n", tmp);
kfree(chip);
return -EINVAL; return -EINVAL;
} }
if (test_and_set_bit(tmp, &nfc->assigned_cs)) { if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
dev_err(dev, "CS %d already assigned\n", tmp); dev_err(dev, "CS %d already assigned\n", tmp);
kfree(chip);
return -EINVAL; return -EINVAL;
} }
@ -1678,12 +1683,14 @@ static int sunxi_nand_chip_init(struct udevice *dev, struct sunxi_nfc *nfc,
dev_err(dev, dev_err(dev,
"could not retrieve timings for ONFI mode 0: %d\n", "could not retrieve timings for ONFI mode 0: %d\n",
ret); ret);
kfree(chip);
return ret; return ret;
} }
ret = sunxi_nand_chip_set_timings(nfc, chip, timings); ret = sunxi_nand_chip_set_timings(nfc, chip, timings);
if (ret) { if (ret) {
dev_err(dev, "could not configure chip timings: %d\n", ret); dev_err(dev, "could not configure chip timings: %d\n", ret);
kfree(chip);
return ret; return ret;
} }
@ -1705,8 +1712,10 @@ static int sunxi_nand_chip_init(struct udevice *dev, struct sunxi_nfc *nfc,
mtd = nand_to_mtd(nand); mtd = nand_to_mtd(nand);
ret = nand_scan_ident(mtd, nsels, NULL); ret = nand_scan_ident(mtd, nsels, NULL);
if (ret) if (ret) {
kfree(chip);
return ret; return ret;
}
if (nand->bbt_options & NAND_BBT_USE_FLASH) if (nand->bbt_options & NAND_BBT_USE_FLASH)
nand->bbt_options |= NAND_BBT_NO_OOB; nand->bbt_options |= NAND_BBT_NO_OOB;
@ -1719,24 +1728,28 @@ static int sunxi_nand_chip_init(struct udevice *dev, struct sunxi_nfc *nfc,
ret = sunxi_nand_chip_init_timings(nfc, chip); ret = sunxi_nand_chip_init_timings(nfc, chip);
if (ret) { if (ret) {
dev_err(dev, "could not configure chip timings: %d\n", ret); dev_err(dev, "could not configure chip timings: %d\n", ret);
kfree(chip);
return ret; return ret;
} }
ret = sunxi_nand_ecc_init(mtd, &nand->ecc); ret = sunxi_nand_ecc_init(mtd, &nand->ecc);
if (ret) { if (ret) {
dev_err(dev, "ECC init failed: %d\n", ret); dev_err(dev, "ECC init failed: %d\n", ret);
kfree(chip);
return ret; return ret;
} }
ret = nand_scan_tail(mtd); ret = nand_scan_tail(mtd);
if (ret) { if (ret) {
dev_err(dev, "nand_scan_tail failed: %d\n", ret); dev_err(dev, "nand_scan_tail failed: %d\n", ret);
kfree(chip);
return ret; return ret;
} }
ret = nand_register(devnum, mtd); ret = nand_register(devnum, mtd);
if (ret) { if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret); dev_err(dev, "failed to register mtd device: %d\n", ret);
kfree(chip);
return ret; return ret;
} }