lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1549955582-30346-6-git-send-email-yamada.masahiro@socionext.com>
Date:   Tue, 12 Feb 2019 16:12:57 +0900
From:   Masahiro Yamada <yamada.masahiro@...ionext.com>
To:     linux-mtd@...ts.infradead.org,
        Miquel Raynal <miquel.raynal@...tlin.com>
Cc:     Boris Brezillon <bbrezillon@...nel.org>,
        Masahiro Yamada <yamada.masahiro@...ionext.com>,
        Brian Norris <computersforpeace@...il.com>,
        linux-kernel@...r.kernel.org, Marek Vasut <marek.vasut@...il.com>,
        Richard Weinberger <richard@....at>,
        David Woodhouse <dwmw2@...radead.org>
Subject: [PATCH v2 05/10] mtd: rawnand: denali: use more precise timeout for NAND_OP_WAITRDT_INSTR

Currently, wait_for_completion_timeout() is always passed in the
hard-coded msec_to_jiffies(1000). There is no specific reason for
1000 msec, but it was chosen to be long enough.

With the exec_op() conversion, NAND_OP_WAITRDY_INSTR provides more
precise timeout value, depending on the preceding command. Let's use
it (+ 100 msec) to bail out earlier in error case. The 100 msec extra
is in case the heavy load on the system.

I am still keeping the hard-coded values for other higher level hooks
such as page_read, page_write, etc. We know the value of tR, tPROG, but
we have unknowledge about the data transfer speed of the DMA engine.

Signed-off-by: Masahiro Yamada <yamada.masahiro@...ionext.com>
---

Changes in v2:
  - Add extra 100 msec to the wait-period in case the system is under load

 drivers/mtd/nand/raw/denali.c | 25 ++++++++++++++++---------
 1 file changed, 16 insertions(+), 9 deletions(-)

diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index bd7df25..7050b1f 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -176,7 +176,7 @@ static void denali_reset_irq(struct denali_nand_info *denali)
 }
 
 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
-				    uint32_t irq_mask)
+				    u32 irq_mask, unsigned int timeout_ms)
 {
 	unsigned long time_left, flags;
 	uint32_t irq_status;
@@ -195,8 +195,11 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
 	reinit_completion(&denali->complete);
 	spin_unlock_irqrestore(&denali->irq_lock, flags);
 
+	/* Prolong the IRQ wait time in case the system is under heavy load. */
+	timeout_ms += 100;
+
 	time_left = wait_for_completion_timeout(&denali->complete,
-						msecs_to_jiffies(1000));
+						msecs_to_jiffies(timeout_ms));
 	if (!time_left) {
 		dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
 			irq_mask);
@@ -349,7 +352,7 @@ static int denali_sw_ecc_fixup(struct nand_chip *chip,
 	 * Once handle all ECC errors, controller will trigger an
 	 * ECC_TRANSACTION_DONE interrupt.
 	 */
-	irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
+	irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE, 1);
 	if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
 		return -EIO;
 
@@ -421,7 +424,7 @@ static int denali_pio_read(struct denali_nand_info *denali, u32 *buf,
 	for (i = 0; i < size / 4; i++)
 		buf[i] = denali->host_read(denali, addr);
 
-	irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
+	irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC, 1);
 	if (!(irq_status & INTR__PAGE_XFER_INC))
 		return -EIO;
 
@@ -444,7 +447,9 @@ static int denali_pio_write(struct denali_nand_info *denali, const u32 *buf,
 		denali->host_write(denali, addr, buf[i]);
 
 	irq_status = denali_wait_for_irq(denali,
-				INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
+					 INTR__PROGRAM_COMP |
+					 INTR__PROGRAM_FAIL,
+					 1000);
 	if (!(irq_status & INTR__PROGRAM_COMP))
 		return -EIO;
 
@@ -501,7 +506,7 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
 	denali_reset_irq(denali);
 	denali->setup_dma(denali, dma_addr, page, write);
 
-	irq_status = denali_wait_for_irq(denali, irq_mask);
+	irq_status = denali_wait_for_irq(denali, irq_mask, 1000);
 	if (!(irq_status & INTR__DMA_CMD_COMP))
 		ret = -EIO;
 	else if (irq_status & ecc_err_mask)
@@ -1168,12 +1173,13 @@ static void denali_exec_out16(struct denali_nand_info *denali, u32 type,
 				   buf[i + 1] << 16 | buf[i]);
 }
 
-static int denali_exec_waitrdy(struct denali_nand_info *denali)
+static int denali_exec_waitrdy(struct denali_nand_info *denali,
+			       unsigned int timeout_ms)
 {
 	u32 irq_stat;
 
 	/* R/B# pin transitioned from low to high? */
-	irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
+	irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT, timeout_ms);
 
 	/* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
 	denali_reset_irq(denali);
@@ -1212,7 +1218,8 @@ static int denali_exec_instr(struct nand_chip *chip,
 				   instr->ctx.data.len);
 		return 0;
 	case NAND_OP_WAITRDY_INSTR:
-		return denali_exec_waitrdy(denali);
+		return denali_exec_waitrdy(denali,
+					   instr->ctx.waitrdy.timeout_ms);
 	default:
 		WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
 			  instr->type);
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ