summaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c7
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c17
-rw-r--r--drivers/mtd/nand/atmel/pmecc.h1
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/denali.c291
-rw-r--r--drivers/mtd/nand/denali.h44
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c5
-rw-r--r--drivers/mtd/nand/diskonchip.c3
-rw-r--r--drivers/mtd/nand/gpio.c112
-rw-r--r--drivers/mtd/nand/hisi504_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c13
-rw-r--r--drivers/mtd/nand/mxc_nand.c19
-rw-r--r--drivers/mtd/nand/nand_base.c34
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c377
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c41
-rw-r--r--drivers/mtd/nand/qcom_nandc.c127
-rw-r--r--drivers/mtd/nand/sh_flctl.c9
21 files changed, 660 insertions, 459 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 3f2036f31da4..bb48aafed9a2 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
+
This enables the driver for the NAND flash device found on
- PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+ PXA3xx processors (NFCv1) and also on 32-bit Armada
+ platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
+ platforms (7K, 8K) (NFCv2).
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index ade5fc4c3819..57f4cdedf137 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -58,7 +58,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
-obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index f25eca79f4e5..90a71a56bc23 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
nc->op.addrs[nc->op.naddrs++] = page;
nc->op.addrs[nc->op.naddrs++] = page >> 8;
- if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
- (mtd->writesize <= 512 && chip->chipsize > SZ_32M))
+ if (chip->options & NAND_ROW_ADDR_3)
nc->op.addrs[nc->op.naddrs++] = page >> 16;
}
}
@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
struct atmel_nand_controller *nc = dev_get_drvdata(dev);
struct atmel_nand *nand;
+ if (nc->pmecc)
+ atmel_pmecc_reset(nc->pmecc);
+
list_for_each_entry(nand, &nc->chips, node) {
int i;
@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
.of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
+ .pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
.remove = atmel_nand_controller_remove,
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 8268636675ef..fcbe4fd6e684 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
}
EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
+{
+ writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
+
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
{
struct atmel_pmecc *pmecc = user->pmecc;
@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
void atmel_pmecc_disable(struct atmel_pmecc_user *user)
{
- struct atmel_pmecc *pmecc = user->pmecc;
-
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(user->pmecc);
mutex_unlock(&user->pmecc->lock);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
-
- /* Reset the ECC engine */
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(pmecc);
return pmecc;
}
diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h
index a8ddbfca2ea5..817e0dd9fd15 100644
--- a/drivers/mtd/nand/atmel/pmecc.h
+++ b/drivers/mtd/nand/atmel/pmecc.h
@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user);
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 9d4a28fa6b73..8ab827edf94e 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
ctx->write_byte(mtd, (u8)(page_addr >> 8));
- /* One more address cycle for devices > 32MiB */
- if (this->chipsize > (32 << 20))
+ if (this->options & NAND_ROW_ADDR_3)
ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f));
}
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3087b0ba7b7f..5124f8ae8c04 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -10,20 +10,18 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
-#include <linux/interrupt.h>
-#include <linux/delay.h>
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
#include <linux/dma-mapping.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "denali.h"
@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL");
#define DENALI_NAND_NAME "denali-nand"
-/* Host Data/Command Interface */
-#define DENALI_HOST_ADDR 0x00
-#define DENALI_HOST_DATA 0x10
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL 0x00
+#define DENALI_INDEXED_DATA 0x10
#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL");
*/
#define DENALI_CLK_X_MULT 6
-/*
- * this macro allows us to convert from an MTD structure to our own
- * device context (denali) structure.
- */
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
}
-static void denali_host_write(struct denali_nand_info *denali,
- uint32_t addr, uint32_t data)
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address). The slave data is the actual data to
+ * be transferred. This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+{
+ return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
{
- iowrite32(addr, denali->host + DENALI_HOST_ADDR);
- iowrite32(data, denali->host + DENALI_HOST_DATA);
+ iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information. This mode reduces the required address range. The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
/*
* Use the configuration feature register to determine the maximum number of
* banks that the hardware supports.
*/
-static void detect_max_banks(struct denali_nand_info *denali)
+static void denali_detect_max_banks(struct denali_nand_info *denali)
{
uint32_t features = ioread32(denali->reg + FEATURES);
- denali->max_banks = 1 << (features & FEATURES__N_BANKS);
+ denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
/* the encoding changed from rev 5.0 to 5.1 */
if (denali->revision < 0x0501)
@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
msecs_to_jiffies(1000));
if (!time_left) {
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
- denali->irq_mask);
+ irq_mask);
return 0;
}
@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
return irq_status;
}
-/*
- * This helper function setups the registers for ECC and whether or not
- * the spare area will be transferred.
- */
-static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
- bool transfer_spare)
-{
- int ecc_en_flag, transfer_spare_flag;
-
- /* set ECC, transfer spare bits if needed */
- ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
- transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
-
- /* Enable spare area/ECC per user's request. */
- iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
- iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
-}
-
static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf[i] = denali->host_read(denali, addr);
}
static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf[i]);
}
static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
uint16_t *buf16 = (uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf16[i] = denali->host_read(denali, addr);
}
static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
const uint16_t *buf16 = (const uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf16[i]);
}
static uint8_t denali_read_byte(struct mtd_info *mtd)
@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
if (ctrl & NAND_CTRL_CHANGE)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_BANK(denali) | type, dat);
+ denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
static int denali_dev_ready(struct mtd_info *mtd)
@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return 0;
}
- max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
+ max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
/*
* The register holds the maximum of per-sector corrected bitflips.
@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
-#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
-#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
-#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
-#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
-#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
-
static int denali_sw_ecc_fixup(struct mtd_info *mtd,
struct denali_nand_info *denali,
unsigned long *uncor_ecc_flags, uint8_t *buf)
@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
do {
err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
- err_sector = ECC_SECTOR(err_addr);
- err_byte = ECC_BYTE(err_addr);
+ err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+ err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
- err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
- err_device = ECC_ERR_DEVICE(err_cor_info);
+ err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+ err_cor_info);
+ err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+ err_cor_info);
/* reset the bitflip counter when crossing ECC sector */
if (err_sector != prev_sector)
bitflips = 0;
- if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
+ if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
/*
* Check later if this is a real ECC error, or
* an erased sector.
@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
}
prev_sector = err_sector;
- } while (!ECC_LAST_ERR(err_cor_info));
+ } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
/*
- * Once handle all ecc errors, controller will trigger a
- * ECC_TRANSACTION_DONE interrupt, so here just wait for
- * a while for this interrupt
+ * Once handle all ECC errors, controller will trigger an
+ * ECC_TRANSACTION_DONE interrupt.
*/
irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-/* programs the controller to either enable/disable DMA transfers */
-static void denali_enable_dma(struct denali_nand_info *denali, bool en)
-{
- iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
- ioread32(denali->reg + DMA_ENABLE);
-}
-
static void denali_setup_dma64(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write)
{
@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages
*/
- denali_host_write(denali, mode,
- 0x01002000 | (64 << 16) | (write << 8) | page_count);
+ denali->host_write(denali, mode,
+ 0x01002000 | (64 << 16) | (write << 8) | page_count);
/* 2. set memory low address */
- denali_host_write(denali, mode, dma_addr);
+ denali->host_write(denali, mode, lower_32_bits(dma_addr));
/* 3. set memory high address */
- denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
+ denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
static void denali_setup_dma32(struct denali_nand_info *denali,
@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
- denali_host_write(denali, mode | page,
- 0x2000 | (write << 8) | page_count);
+ denali->host_write(denali, mode | page,
+ 0x2000 | (write << 8) | page_count);
/* 2. set memory high address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+ denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+ denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */
- denali_host_write(denali, mode | 0x14000, 0x2400);
-}
-
-static void denali_setup_dma(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
-{
- if (denali->caps & DENALI_CAP_DMA_64BIT)
- denali_setup_dma64(denali, dma_addr, page, write);
- else
- denali_setup_dma32(denali, dma_addr, page, write);
+ denali->host_write(denali, mode | 0x14000, 0x2400);
}
static int denali_pio_read(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status, ecc_err_mask;
int i;
@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- *buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
+ *buf32++ = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
static int denali_pio_write(struct denali_nand_info *denali,
const void *buf, size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status;
int i;
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, *buf32++);
irq_status = denali_wait_for_irq(denali,
INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
ecc_err_mask = INTR__ECC_ERR;
}
- denali_enable_dma(denali, true);
+ iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
- denali_setup_dma(denali, dma_addr, page, write);
+ denali->setup_dma(denali, dma_addr, page, write);
- /* wait for operation to complete */
irq_status = denali_wait_for_irq(denali, irq_mask);
if (!(irq_status & INTR__DMA_CMD_COMP))
ret = -EIO;
else if (irq_status & ecc_err_mask)
ret = -EBADMSG;
- denali_enable_dma(denali, false);
+ iowrite32(0, denali->reg + DMA_ENABLE);
+
dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw, int write)
{
- setup_ecc_for_xfer(denali, !raw, raw);
+ iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
+ denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, raw, write);
@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
- DENALI_ERASE);
+ denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
+ DENALI_ERASE);
/* wait for erase to complete or failure to occur */
irq_status = denali_wait_for_irq(denali,
@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
- tmp |= acc_clks;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */
@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
- tmp |= re_2_we;
+ tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */
@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
- tmp |= re_2_re;
+ tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
iowrite32(tmp, denali->reg + RE_2_RE);
- /* tWHR -> WE_2_RE */
- we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
+ /*
+ * tCCS, tWHR -> WE_2_RE
+ *
+ * With WE_2_RE properly set, the Denali controller automatically takes
+ * care of the delay; the driver need not set NAND_WAIT_TCCS.
+ */
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
+ t_clk);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
- tmp |= we_2_re;
+ tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
/* tADL -> ADDR_2_DATA */
@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
- tmp &= ~addr_2_data_mask;
- tmp |= addr_2_data;
+ tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
/* tREH, tWH -> RDWR_EN_HI_CNT */
@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
- tmp |= rdwr_en_hi;
+ tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */
@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
- tmp |= rdwr_en_lo;
+ tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */
@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
- tmp |= cs_setup;
+ tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
iowrite32(tmp, denali->reg + CS_SETUP_CNT);
return 0;
@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali)
* if this value is 0, just let it be.
*/
denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- detect_max_banks(denali);
+ denali_detect_max_banks(denali);
iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
-
- /* Should set value for these registers when init */
- iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, denali->reg + ECC_ENABLE);
}
int denali_calc_ecc_bytes(int step_size, int strength)
@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
-/* initialize driver data structures */
-static void denali_drv_init(struct denali_nand_info *denali)
-{
- /*
- * the completion object will be used to notify
- * the callee that the interrupt is done
- */
- init_completion(&denali->complete);
-
- /*
- * the spinlock will be used to synchronize the ISR with any
- * element that might be access shared data (interrupt status)
- */
- spin_lock_init(&denali->irq_lock);
-}
-
static int denali_multidev_fixup(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 features = ioread32(denali->reg + FEATURES);
int ret;
mtd->dev.parent = denali->dev;
denali_hw_init(denali);
- denali_drv_init(denali);
+
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
denali_clear_irq_all(denali);
- /* Request IRQ after all the hardware initialization is finished */
ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali);
if (ret) {
@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name)
mtd->name = "denali-nand";
- /* register the driver with the NAND core subsystem */
chip->select_chip = denali_select_chip;
chip->read_byte = denali_read_byte;
chip->write_byte = denali_write_byte;
@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali)
chip->dev_ready = denali_dev_ready;
chip->waitfunc = denali_waitfunc;
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
/* clk rate info is needed for setup_data_interface */
if (denali->clk_x_rate)
chip->setup_data_interface = denali_setup_data_interface;
- /*
- * scan for NAND devices attached to the controller
- * this is the first stage in a two step process to register
- * with the nand subsystem
- */
ret = nand_scan_ident(mtd, denali->max_banks, NULL);
if (ret)
goto disable_irq;
@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali)
if (denali->dma_avail) {
chip->options |= NAND_USE_BOUNCE_BUFFER;
chip->buf_align = 16;
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
}
- /*
- * second stage of the NAND scan
- * this stage requires information regarding ECC and
- * bad block management.
- */
-
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
-
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-
- /* no subpage writes on denali */
chip->options |= NAND_NO_SUBPAGE_WRITE;
ret = denali_ecc_setup(mtd, chip, denali);
@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
- iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
denali->reg + ECC_CORRECTION);
iowrite32(mtd->erasesize / mtd->writesize,
denali->reg + PAGES_PER_BLOCK);
iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
denali->reg + DEVICE_WIDTH);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
@@ -1441,7 +1403,6 @@ disable_irq:
}
EXPORT_SYMBOL(denali_init);
-/* driver exit point */
void denali_remove(struct denali_nand_info *denali)
{
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 9239e6793e6e..2911066dacac 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -10,18 +10,16 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef __DENALI_H__
#define __DENALI_H__
#include <linux/bitops.h>
+#include <linux/completion.h>
#include <linux/mtd/rawnand.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK(bank) BIT(bank)
@@ -111,9 +109,6 @@
#define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE GENMASK(4, 0)
#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
-#define MAKE_ECC_CORRECTION(val, thresh) \
- (((val) & (ECC_CORRECTION__VALUE)) | \
- (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
#define READ_MODE 0x1c0
#define READ_MODE__VALUE GENMASK(3, 0)
@@ -255,13 +250,13 @@
#define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
-#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12)
+#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0)
-#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8)
-#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14)
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15)
+#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__UNCOR BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
@@ -310,23 +305,24 @@ struct denali_nand_info {
struct device *dev;
void __iomem *reg; /* Register Interface */
void __iomem *host; /* Host Data/Command Interface */
-
- /* elements used by ISR */
struct completion complete;
- spinlock_t irq_lock;
- uint32_t irq_mask;
- uint32_t irq_status;
+ spinlock_t irq_lock; /* protect irq_mask and irq_status */
+ u32 irq_mask; /* interrupts we are waiting for */
+ u32 irq_status; /* interrupts that have happened */
int irq;
-
- void *buf;
+ void *buf; /* for syndrome layout conversion */
dma_addr_t dma_addr;
- int dma_avail;
+ int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */
- int oob_skip_bytes;
+ int oob_skip_bytes; /* number of bytes reserved for BBM */
int max_banks;
- unsigned int revision;
- unsigned int caps;
+ unsigned int revision; /* IP revision */
+ unsigned int caps; /* IP capability (or quirk) */
const struct nand_ecc_caps *ecc_caps;
+ u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
+ void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
+ void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
+ int page, int write);
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 56e2e177644d..cfd33e6ca77f 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -12,15 +12,16 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "denali.h"
@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = {
.of_match_table = denali_nand_dt_ids,
},
};
-
module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 81370c79aa48..57fb7ae31412 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -11,6 +11,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
+#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -106,7 +109,6 @@ failed_remap_reg:
return ret;
}
-/* driver exit point */
static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_nand_info *denali = pci_get_drvdata(dev);
@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = {
.probe = denali_pci_probe,
.remove = denali_pci_remove,
};
-
module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index c3aa53caab5c..72671dc52e2e 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
if (page_addr != -1) {
WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
- /* One more address cycle for higher density devices */
- if (this->chipsize & 0x0c000000) {
+ if (this->options & NAND_ROW_ADDR_3) {
WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
printk("high density\n");
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index fd3648952b5a..484f7fbc3f7d 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
@@ -31,12 +31,16 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
struct nand_chip nand_chip;
struct gpio_nand_platdata plat;
+ struct gpio_desc *nce; /* Optional chip enable */
+ struct gpio_desc *cle;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct gpio_desc *nwp; /* Optional write protection */
};
static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd);
if (ctrl & NAND_CTRL_CHANGE) {
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce,
- !(ctrl & NAND_NCE));
- gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
- gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+ if (gpiomtd->nce)
+ gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
+ gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
+ gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
gpio_nand_dosync(gpiomtd);
}
if (cmd == NAND_CMD_NONE)
@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+ return gpiod_get_value(gpiomtd->rdy);
}
#ifdef CONFIG_OF
@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev,
}
}
- plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
- plat->gpio_nce = of_get_gpio(dev->of_node, 1);
- plat->gpio_ale = of_get_gpio(dev->of_node, 2);
- plat->gpio_cle = of_get_gpio(dev->of_node, 3);
- plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
-
if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
plat->chip_delay = val;
@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev)
nand_release(nand_to_mtd(&gpiomtd->nand_chip));
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+ /* Enable write protection and disable the chip */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return 0;
}
@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
+ struct device *dev = &pdev->dev;
int ret = 0;
- if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
+ if (!dev->of_node && !dev_get_platdata(dev))
return -EINVAL;
- gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+ gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
if (!gpiomtd)
return -ENOMEM;
chip = &gpiomtd->nand_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
if (IS_ERR(chip->IO_ADDR_R))
return PTR_ERR(chip->IO_ADDR_R);
res = gpio_nand_get_io_sync(pdev);
if (res) {
- gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+ gpiomtd->io_sync = devm_ioremap_resource(dev, res);
if (IS_ERR(gpiomtd->io_sync))
return PTR_ERR(gpiomtd->io_sync);
}
- ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+ ret = gpio_nand_get_config(dev, &gpiomtd->plat);
if (ret)
return ret;
- if (gpio_is_valid(gpiomtd->plat.gpio_nce)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce,
- "NAND NCE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+ /* Just enable the chip */
+ gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiomtd->nce))
+ return PTR_ERR(gpiomtd->nce);
+
+ /* We disable write protection once we know probe() will succeed */
+ gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
- "NAND NWP");
- if (ret)
- return ret;
+ gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+ gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->cle)) {
+ ret = PTR_ERR(gpiomtd->cle);
+ goto out_ce;
+ }
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
-
- if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
- "NAND RDY");
- if (ret)
- return ret;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
- chip->dev_ready = gpio_nand_devready;
+ gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
+ if (IS_ERR(gpiomtd->rdy)) {
+ ret = PTR_ERR(gpiomtd->rdy);
+ goto out_ce;
}
+ /* Using RDY pin */
+ if (gpiomtd->rdy)
+ chip->dev_ready = gpio_nand_devready;
nand_set_flash_node(chip, pdev->dev.of_node);
chip->IO_ADDR_W = chip->IO_ADDR_R;
@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip->cmd_ctrl = gpio_nand_cmd_ctrl;
mtd = nand_to_mtd(chip);
- mtd->dev.parent = &pdev->dev;
+ mtd->dev.parent = dev;
platform_set_drvdata(pdev, gpiomtd);
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+ /* Disable write protection, if wired up */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_direction_output(gpiomtd->nwp, 1);
ret = nand_scan(mtd, 1);
if (ret)
@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev)
return 0;
err_wp:
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+out_ce:
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return ret;
}
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index d9ee1a7e6956..0897261c3e17 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
host->addr_value[0] |= (page_addr & 0xffff)
<< (host->addr_cycle * 8);
host->addr_cycle += 2;
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
host->addr_cycle += 1;
if (host->command == NAND_CMD_ERASE1)
host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index 7f3b065b6b8f..c51d214d169e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
op = ECC_DECODE;
dec = readw(ecc->regs + ECC_DECDONE);
if (dec & ecc->sectors) {
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
ecc->sectors = 0;
complete(&ecc->done);
} else {
@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
}
}
- writel(0, ecc->regs + ECC_IRQ_REG(op));
-
return IRQ_HANDLED;
}
@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
+ if (op == ECC_DECODE)
+ /*
+ * Clear decode IRQ status in case there is a timeout to wait
+ * decode IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
writew(0, ecc->regs + ECC_IRQ_REG(op));
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 53e5e0337c3e..f3be0b2a8869 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
- pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+ dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1);
}
if (max_retries < 0)
- pr_debug("%s: RESET failed\n", __func__);
+ dev_dbg(host->dev, "%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
- pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+ dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
- pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n");
return -EBADMSG;
}
@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
do {
err = ecc_stat & ecc_bit_mask;
if (err > err_limit) {
- printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+ dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n");
return -EBADMSG;
} else {
ret += err;
@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+ dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
}
@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
host->buf_start++;
}
- pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+ dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true);
} else {
- /* One more address cycle for higher density devices */
- if (mtd->size >= 0x4000000) {
+ if (nand_chip->options & NAND_ROW_ADDR_3) {
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff,
@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 12edaae17d81..6135d007a068 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- if (section)
+ if (section || !ecc->total)
return -ERANGE;
oobregion->length = ecc->total;
@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
- /* One more address cycle for devices > 32MiB */
- if (chip->chipsize > (32 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
}
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
chip->cmd_ctrl(mtd, page_addr >> 8,
NAND_NCE | NAND_ALE);
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16,
NAND_NCE | NAND_ALE);
}
@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
return 0;
}
+EXPORT_SYMBOL_GPL(nand_reset);
/**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops;
int ret;
- /* Wait for the device to get ready */
- panic_nand_wait(mtd, chip, 400);
-
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
+ chip->select_chip(mtd, chipnr);
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
@@ -3999,6 +4001,9 @@ ident_done:
chip->chip_shift += 32 - 1;
}
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
+
chip->badblockbits = 8;
chip->erase = single_erase;
@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
break;
default:
+ /*
+ * Expose the whole OOB area to users if ECC_NONE
+ * is passed. We could do that for all kind of
+ * ->oobsize, but we must keep the old large/small
+ * page with ECC layout when ->oobsize <= 128 for
+ * compatibility reasons.
+ */
+ if (ecc->mode == NAND_ECC_NONE) {
+ mtd_set_ooblayout(mtd,
+ &nand_ooblayout_lp_ops);
+ break;
+ }
+
WARN(1, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7bb4d2ea9342..af5b32c9a791 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (page_addr != -1) {
write_addr_reg(nand, page_addr);
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
write_addr_reg(nand, page_addr >> 8);
write_addr_reg(nand, page_addr >> 16 | ENDADDR);
} else {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 54540c8fa1a2..dad438c4906a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5};
/**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
+ * @i: The sector number (for a multi sector page)
*
- * Support calculating of BCH4/8 ecc vectors for the page
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
*/
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
- const u_char *dat, u_char *ecc_calc)
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc, int i)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code;
- unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val;
- int i, j;
+ int j;
+
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ unsigned long nsectors;
+ int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) {
- ecc_code = ecc_calc;
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH8_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
- bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
- *ecc_code++ = (bch_val4 & 0xFF);
- *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
- *ecc_code++ = (bch_val3 & 0xFF);
- *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
- *ecc_code++ = (bch_val2 & 0xFF);
- *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
- *ecc_code++ = (bch_val1 & 0xFF);
- break;
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH4_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val2 & 0xF) << 4) |
- ((bch_val1 >> 28) & 0xF);
- *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val1 & 0xF) << 4);
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- val = readl(gpmc_regs->gpmc_bch_result6[i]);
- ecc_code[0] = ((val >> 8) & 0xFF);
- ecc_code[1] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result5[i]);
- ecc_code[2] = ((val >> 24) & 0xFF);
- ecc_code[3] = ((val >> 16) & 0xFF);
- ecc_code[4] = ((val >> 8) & 0xFF);
- ecc_code[5] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result4[i]);
- ecc_code[6] = ((val >> 24) & 0xFF);
- ecc_code[7] = ((val >> 16) & 0xFF);
- ecc_code[8] = ((val >> 8) & 0xFF);
- ecc_code[9] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result3[i]);
- ecc_code[10] = ((val >> 24) & 0xFF);
- ecc_code[11] = ((val >> 16) & 0xFF);
- ecc_code[12] = ((val >> 8) & 0xFF);
- ecc_code[13] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result2[i]);
- ecc_code[14] = ((val >> 24) & 0xFF);
- ecc_code[15] = ((val >> 16) & 0xFF);
- ecc_code[16] = ((val >> 8) & 0xFF);
- ecc_code[17] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result1[i]);
- ecc_code[18] = ((val >> 24) & 0xFF);
- ecc_code[19] = ((val >> 16) & 0xFF);
- ecc_code[20] = ((val >> 8) & 0xFF);
- ecc_code[21] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result0[i]);
- ecc_code[22] = ((val >> 24) & 0xFF);
- ecc_code[23] = ((val >> 16) & 0xFF);
- ecc_code[24] = ((val >> 8) & 0xFF);
- ecc_code[25] = ((val >> 0) & 0xFF);
- break;
- default:
- return -EINVAL;
- }
-
- /* ECC scheme specific syndrome customizations */
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch4_polynomial[j];
- break;
- case OMAP_ECC_BCH4_CODE_HW:
- /* Set 8th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch8_polynomial[j];
- break;
- case OMAP_ECC_BCH8_CODE_HW:
- /* Set 14th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- break;
- default:
- return -EINVAL;
- }
+ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+ if (ret)
+ return ret;
- ecc_calc += eccbytes;
+ ecc_calc += eccbytes;
}
return 0;
@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->write_buf(mtd, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */
- chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+ omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
@@ -1509,6 +1552,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ u32 start_step = offset / ecc_size;
+ u32 end_step = (offset + data_len - 1) / ecc_size;
+ int step, ret = 0;
+
+ /*
+ * Write entire page at one go as it would be optimal
+ * as ECC is calculated by hardware.
+ * ECC is calculated for all subpages but we choose
+ * only what we want.
+ */
+
+ /* Enable GPMC ECC engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if (step < start_step || step > end_step)
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+ if (ret)
+ return ret;
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
* omap_read_page_bch - BCH ecc based page read function for entire page
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.total);
/* Calculate ecc bytes */
- chip->ecc.calculate(mtd, buf, ecc_calc);
+ omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info,
return true;
}
-static bool omap2_nand_ecc_check(struct omap_nand_info *info,
- struct omap_nand_platform_data *pdata)
+static bool omap2_nand_ecc_check(struct omap_nand_info *info)
{
bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
- struct omap_nand_platform_data *pdata = NULL;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int err;
@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
- if (dev->of_node) {
- if (omap_get_dt_info(dev, info))
- return -EINVAL;
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- info->gpmc_cs = pdata->cs;
- info->reg = pdata->reg;
- info->ecc_opt = pdata->ecc_opt;
- if (pdata->dev_ready)
- dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
-
- info->xfer_type = pdata->xfer_type;
- info->devsize = pdata->devsize;
- info->elm_of_node = pdata->elm_of_node;
- info->flash_bbt = pdata->flash_bbt;
- }
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
- platform_set_drvdata(pdev, info);
info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
if (!info->ops) {
dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- if (!omap2_nand_ecc_check(info, pdata)) {
+ if (!omap2_nand_ecc_check(info)) {
err = -EINVAL;
goto return_error;
}
@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 16;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2167,10 +2255,9 @@ scan_tail:
if (err)
goto return_error;
- if (dev->of_node)
- mtd_device_register(mtd, NULL, 0);
- else
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto return_error;
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 85cff68643e0..90b9a9ccbe60 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -30,6 +30,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
#define NAND_STOP_DELAY msecs_to_jiffies(40)
@@ -45,6 +47,10 @@
*/
#define INIT_BUFFER_SIZE 2048
+/* System control register and bit to enable NAND on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@@ -174,6 +180,7 @@ enum {
enum pxa3xx_nand_variant {
PXA3XX_NAND_VARIANT_PXA,
PXA3XX_NAND_VARIANT_ARMADA370,
+ PXA3XX_NAND_VARIANT_ARMADA_8K,
};
struct pxa3xx_nand_host {
@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,armada370-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
},
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
+ },
{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
info->retcode = ERR_UNCORERR;
if (status & NDSR_CORERR) {
info->retcode = ERR_CORERR;
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
info->ecc_bch)
info->ecc_err_cnt = NDSR_ERR_CNT(status);
else
@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb2);
/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDCB0, info->ndcb3);
}
@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->options |= NAND_BUSWIDTH_16;
/* Device detection must be done with ECC disabled */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDECCCTRL, 0x0);
if (pdata->flash_bbt)
@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
* (aka splitted) command handling,
*/
if (mtd->writesize > PAGE_CHUNK_SIZE) {
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
chip->cmdfunc = nand_cmdfunc_extended;
} else {
dev_err(&info->pdev->dev,
@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (!of_id)
return 0;
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller to avoid being bootloader dependent. This is done
+ * through the use of a single bit in the System Functions registers.
+ */
+ if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
+ struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node, "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
+ reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+ }
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 3baddfc997d1..2656c1ac5646 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/delay.h>
+#include <linux/dma/qcom_bam_dma.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
*/
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
+ * @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
* @cmd_sgl_pos - current index in command sgl.
* @cmd_sgl_start - start index in command sgl.
* @tx_sgl_pos - current index in data sgl for tx.
@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @rx_sgl_start - start index in data sgl for rx.
*/
struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
u32 cmd_sgl_pos;
u32 cmd_sgl_start;
u32 tx_sgl_pos;
@@ -307,7 +325,8 @@ struct nandc_regs {
* controller
* @dev: parent device
* @base: MMIO base
- * @base_dma: physical base address of controller registers
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @core_clk: controller clock
* @aon_clk: another controller clock
*
@@ -340,6 +359,7 @@ struct qcom_nand_controller {
struct device *dev;
void __iomem *base;
+ phys_addr_t base_phys;
dma_addr_t base_dma;
struct clk *core_clk;
@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn_size =
sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn = bam_txn_buf;
bam_txn_buf += sizeof(*bam_txn);
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
bam_txn->cmd_sgl = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
if (!nandc->props->is_bam)
return;
+ bam_txn->bam_ce_pos = 0;
+ bam_txn->bam_ce_start = 0;
bam_txn->cmd_sgl_pos = 0;
bam_txn->cmd_sgl_start = 0;
bam_txn->tx_sgl_pos = 0;
@@ -734,6 +761,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
}
/*
+ * Prepares the command descriptor for BAM DMA which will be used for NAND
+ * register reads and writes. The command descriptor requires the command
+ * to be formed in command element type so this function uses the command
+ * element from bam transaction ce array and fills the same with required
+ * data. A single SGL can contain multiple command elements so
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE |
+ DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
* Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes.
*/
@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
{
bool flow_control = false;
void *vaddr;
- int size;
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, first);
- size = num_regs * sizeof(u32);
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
+ return prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
bool flow_control = false;
struct nandc_regs *regs = nandc->regs;
void *vaddr;
- int size;
vaddr = offset_to_nandc_reg(regs, first);
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
if (first == NAND_ERASED_CW_DETECT_CFG) {
if (flags & NAND_ERASED_CW_SET)
vaddr = &regs->erased_cw_detect_cfg_set;
@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
- size = num_regs * sizeof(u32);
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, false, first, vaddr, size,
- flow_control);
+ return prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
+ r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
if (r)
return r;
}
@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (IS_ERR(nandc->base))
return PTR_ERR(nandc->base);
+ nandc->base_phys = res->start;
nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
nandc->core_clk = devm_clk_get(dev, "core");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e7f3c98487e6..3c5008a4f5f3 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match);
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
- const struct of_device_id *match;
- struct flctl_soc_config *config;
+ const struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata;
- match = of_match_device(of_flctl_match, dev);
- if (match)
- config = (struct flctl_soc_config *)match->data;
- else {
+ config = of_device_get_match_data(dev);
+ if (!config) {
dev_err(dev, "%s: no OF configuration attached\n", __func__);
return NULL;
}