[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251205-winbond-v6-18-rc1-cont-read-v1-6-01bc48631c73@bootlin.com>
Date: Fri, 05 Dec 2025 20:38:57 +0100
From: Miquel Raynal <miquel.raynal@...tlin.com>
To: Mark Brown <broonie@...nel.org>, Richard Weinberger <richard@....at>,
Vignesh Raghavendra <vigneshr@...com>, Michael Walle <mwalle@...nel.org>
Cc: Tudor Ambarus <tudor.ambarus@...aro.org>,
Pratyush Yadav <pratyush@...nel.org>,
Thomas Petazzoni <thomas.petazzoni@...tlin.com>,
Steam Lin <STLin2@...bond.com>, Santhosh Kumar K <s-k6@...com>,
linux-spi@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mtd@...ts.infradead.org, Miquel Raynal <miquel.raynal@...tlin.com>
Subject: [PATCH RFC 6/8] mtd: spinand: Use secondary ops for continuous
reads
In case a chip supports continuous reads, but uses a slightly different
cache operation for these, it may provide a secondary operation template
which will be used only during continuous cache read operations.
>From a vendor driver point of view, enabling this feature implies
providing a new set of templates for these continuous read
operations. The core will automatically pick the fastest variant,
depending on the hardware capabilities.
Signed-off-by: Miquel Raynal <miquel.raynal@...tlin.com>
---
drivers/mtd/nand/spi/core.c | 51 ++++++++++++++++++++++++++++++++++++++++++++-
include/linux/mtd/spinand.h | 12 +++++++++++
2 files changed, 62 insertions(+), 1 deletion(-)
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 209146f21326..37a0d0373942 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -399,6 +399,11 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+ if (spinand->op_templates->cont_read_cache && req->continuous)
+ rdesc->info.op_tmpl = &rdesc->info.secondary_op_tmpl;
+ else
+ rdesc->info.op_tmpl = &rdesc->info.primary_op_tmpl;
+
if (nand->ecc.engine->integration == NAND_ECC_ENGINE_INTEGRATION_PIPELINED &&
req->mode != MTD_OPS_RAW)
rdesc->info.op_tmpl->data.ecc = true;
@@ -1123,6 +1128,7 @@ static struct spi_mem_dirmap_desc *spinand_create_rdesc(
* its spi controller, use regular reading
*/
spinand->cont_read_possible = false;
+ memset(&info->secondary_op_tmpl, 0, sizeof(info->secondary_op_tmpl));
info->length = nanddev_page_size(nand) +
nanddev_per_page_oobsize(nand);
@@ -1139,11 +1145,24 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
struct nand_device *nand = spinand_to_nand(spinand);
struct spi_mem_dirmap_info info = { 0 };
struct spi_mem_dirmap_desc *desc;
- bool enable_ecc = false;
+ bool enable_ecc = false, secondary_op = false;
if (nand->ecc.engine->integration == NAND_ECC_ENGINE_INTEGRATION_PIPELINED)
enable_ecc = true;
+ if (spinand->cont_read_possible && spinand->op_templates->cont_read_cache)
+ secondary_op = true;
+
+ /*
+ * Continuous read implies that only the main data is retrieved, backed
+ * by an on-die ECC engine. It is not possible to use a pipelind ECC
+ * engine with continuous read.
+ */
+ if (enable_ecc && secondary_op) {
+ secondary_op = false;
+ spinand->cont_read_possible = false;
+ }
+
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
@@ -1161,6 +1180,10 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
/* Read descriptor */
info.primary_op_tmpl = *spinand->op_templates->read_cache;
info.primary_op_tmpl.data.ecc = enable_ecc;
+ if (secondary_op) {
+ info.secondary_op_tmpl = *spinand->op_templates->cont_read_cache;
+ info.secondary_op_tmpl.data.ecc = enable_ecc;
+ }
desc = spinand_create_rdesc(spinand, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -1505,6 +1528,27 @@ int spinand_match_and_init(struct spinand_device *spinand,
if (ret)
return ret;
+ op = spinand_select_op_variant(spinand, SSDR,
+ info->op_variants.cont_read_cache);
+ if (op) {
+ const struct spi_mem_op *read_op = spinand->ssdr_op_templates.read_cache;
+
+ /*
+ * Sometimes the fastest continuous read variant may not
+ * be supported. In this case, prefer to use the fastest
+ * read from cache variant and disable continuous reads.
+ */
+ if (read_op->cmd.buswidth != op->cmd.buswidth ||
+ read_op->cmd.dtr != op->cmd.dtr ||
+ read_op->addr.buswidth != op->addr.buswidth ||
+ read_op->addr.dtr != op->addr.dtr ||
+ read_op->cmd.buswidth != op->cmd.buswidth ||
+ read_op->cmd.dtr != op->cmd.dtr)
+ spinand->cont_read_possible = false;
+ else
+ spinand->ssdr_op_templates.cont_read_cache = op;
+ }
+
/* I/O variants selection with octo-spi DDR commands (optional) */
ret = spinand_init_odtr_instruction_set(spinand);
@@ -1527,6 +1571,11 @@ int spinand_match_and_init(struct spinand_device *spinand,
info->op_variants.update_cache);
spinand->odtr_op_templates.update_cache = op;
+ op = spinand_select_op_variant(spinand, ODTR,
+ info->op_variants.cont_read_cache);
+ if (op)
+ spinand->odtr_op_templates.cont_read_cache = op;
+
return 0;
}
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 5ca1181048f7..5ec7d756df8b 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -573,6 +573,7 @@ enum spinand_bus_interface {
* @op_variants.read_cache: variants of the read-cache operation
* @op_variants.write_cache: variants of the write-cache operation
* @op_variants.update_cache: variants of the update-cache operation
+ * @op_variants.cont_read_cache: variants of the continuous read-cache operation
* @vendor_ops: vendor specific operations
* @select_target: function used to select a target/die. Required only for
* multi-die chips
@@ -597,6 +598,7 @@ struct spinand_info {
const struct spinand_op_variants *read_cache;
const struct spinand_op_variants *write_cache;
const struct spinand_op_variants *update_cache;
+ const struct spinand_op_variants *cont_read_cache;
} op_variants;
const struct spinand_op_variants *vendor_ops;
int (*select_target)(struct spinand_device *spinand,
@@ -626,6 +628,14 @@ struct spinand_info {
.update_cache = __update, \
}
+#define SPINAND_INFO_OP_VARIANTS_WITH_CONT(__read, __write, __update, __cont_read) \
+ { \
+ .read_cache = __read, \
+ .write_cache = __write, \
+ .update_cache = __update, \
+ .cont_read_cache = __cont_read, \
+ }
+
#define SPINAND_INFO_VENDOR_OPS(__ops) \
.vendor_ops = __ops
@@ -697,6 +707,7 @@ struct spinand_dirmap {
* @read_cache: read cache op template
* @write_cache: write cache op template
* @update_cache: update cache op template
+ * @cont_read_cache: continuous read cache op template (optional)
*/
struct spinand_mem_ops {
struct spi_mem_op reset;
@@ -711,6 +722,7 @@ struct spinand_mem_ops {
const struct spi_mem_op *read_cache;
const struct spi_mem_op *write_cache;
const struct spi_mem_op *update_cache;
+ const struct spi_mem_op *cont_read_cache;
};
/**
--
2.51.1
Powered by blists - more mailing lists