summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrice Chotard <patrice.chotard@foss.st.com>2021-05-18 18:27:53 +0200
committerMark Brown <broonie@kernel.org>2021-06-03 14:04:57 +0100
commit8941cd8d295e40f8ea1c0a5045d6d068b8e33eec (patch)
tree7079a3f7ca62ac028fa42908f85d8781169ddf0b
parentc955a0cc8a286e5da1ebb88c19201e9bab8c2422 (diff)
downloadlinux-8941cd8d295e40f8ea1c0a5045d6d068b8e33eec.tar.bz2
mtd: spinand: use the spi-mem poll status APIs
Make use of spi-mem poll status APIs to let advanced controllers optimize wait operations. This should also fix the high CPU usage for system that don't have a dedicated STATUS poll block logic. Signed-off-by: Patrice Chotard <patrice.chotard@foss.st.com> Signed-off-by: Christophe Kerello <christophe.kerello@foss.st.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Acked-by: Miquel Raynal <miquel.raynal@bootlin.com> Link: https://lore.kernel.org/r/20210518162754.15940-3-patrice.chotard@foss.st.com Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--drivers/mtd/nand/spi/core.c45
-rw-r--r--include/linux/mtd/spinand.h22
2 files changed, 54 insertions, 13 deletions
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 17f63f95f4a2..3131fae0c715 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -473,20 +473,26 @@ static int spinand_erase_op(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand, u8 *s)
+static int spinand_wait(struct spinand_device *spinand,
+ unsigned long initial_delay_us,
+ unsigned long poll_delay_us,
+ u8 *s)
{
- unsigned long timeo = jiffies + msecs_to_jiffies(400);
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
+ spinand->scratchbuf);
u8 status;
int ret;
- do {
- ret = spinand_read_status(spinand, &status);
- if (ret)
- return ret;
+ ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
+ initial_delay_us,
+ poll_delay_us,
+ SPINAND_WAITRDY_TIMEOUT_MS);
+ if (ret)
+ return ret;
- if (!(status & STATUS_BUSY))
- goto out;
- } while (time_before(jiffies, timeo));
+ status = *spinand->scratchbuf;
+ if (!(status & STATUS_BUSY))
+ goto out;
/*
* Extra read, just in case the STATUS_READY bit has changed
@@ -526,7 +532,10 @@ static int spinand_reset_op(struct spinand_device *spinand)
if (ret)
return ret;
- return spinand_wait(spinand, NULL);
+ return spinand_wait(spinand,
+ SPINAND_RESET_INITIAL_DELAY_US,
+ SPINAND_RESET_POLL_DELAY_US,
+ NULL);
}
static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
@@ -549,7 +558,10 @@ static int spinand_read_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_READ_INITIAL_DELAY_US,
+ SPINAND_READ_POLL_DELAY_US,
+ &status);
if (ret < 0)
return ret;
@@ -585,7 +597,10 @@ static int spinand_write_page(struct spinand_device *spinand,
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
if (!ret && (status & STATUS_PROG_FAILED))
return -EIO;
@@ -768,7 +783,11 @@ static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_wait(spinand, &status);
+ ret = spinand_wait(spinand,
+ SPINAND_ERASE_INITIAL_DELAY_US,
+ SPINAND_ERASE_POLL_DELAY_US,
+ &status);
+
if (!ret && (status & STATUS_ERASE_FAILED))
ret = -EIO;
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 6bb92f26833e..6988956b8492 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -170,6 +170,28 @@ struct spinand_op;
struct spinand_device;
#define SPINAND_MAX_ID_LEN 4
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
/**
* struct spinand_id - SPI NAND id structure