lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210106214109.44041-3-jaegeuk@kernel.org>
Date:   Wed,  6 Jan 2021 13:41:09 -0800
From:   Jaegeuk Kim <jaegeuk@...nel.org>
To:     linux-kernel@...r.kernel.org, linux-scsi@...r.kernel.org,
        kernel-team@...roid.com
Cc:     cang@...eaurora.org, alim.akhtar@...sung.com, avri.altman@....com,
        bvanassche@....org, martin.petersen@...cle.com,
        stanley.chu@...iatek.com, Jaegeuk Kim <jaegeuk@...gle.com>,
        Jaegeuk Kim <jaegeuk@...nel.org>
Subject: [PATCH v3 2/2] scsi: ufs: handle LINERESET with correct tm_cmd

From: Jaegeuk Kim <jaegeuk@...gle.com>

This fixes a warning caused by wrong reserve tag usage in __ufshcd_issue_tm_cmd.

WARNING: CPU: 7 PID: 7 at block/blk-core.c:630 blk_get_request+0x68/0x70
WARNING: CPU: 4 PID: 157 at block/blk-mq-tag.c:82 blk_mq_get_tag+0x438/0x46c

And, in ufshcd_err_handler(), we can avoid to send tm_cmd before aborting
outstanding commands by waiting a bit for IO completion like this.

__ufshcd_issue_tm_cmd: task management cmd 0x80 timed-out

Signed-off-by: Jaegeuk Kim <jaegeuk@...nel.org>
---
 drivers/scsi/ufs/ufshcd.c | 36 ++++++++++++++++++++++++++++++++----
 1 file changed, 32 insertions(+), 4 deletions(-)

diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 1678cec08b51..47fc8da3cbf9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -44,6 +44,9 @@
 /* Query request timeout */
 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
 
+/* LINERESET TIME OUT */
+#define LINERESET_IO_TIMEOUT_MS			(30000) /* 30 sec */
+
 /* Task management command timeout */
 #define TM_CMD_TIMEOUT	100 /* msecs */
 
@@ -5899,6 +5902,8 @@ static void ufshcd_err_handler(struct work_struct *work)
 	 * check if power mode restore is needed.
 	 */
 	if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
+		ktime_t start = ktime_get();
+
 		hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
 		if (!hba->saved_uic_err)
 			hba->saved_err &= ~UIC_ERROR;
@@ -5906,6 +5911,20 @@ static void ufshcd_err_handler(struct work_struct *work)
 		if (ufshcd_is_pwr_mode_restore_needed(hba))
 			needs_restore = true;
 		spin_lock_irqsave(hba->host->host_lock, flags);
+		/* Wait for IO completion to avoid aborting IOs */
+		while (hba->outstanding_reqs) {
+			ufshcd_complete_requests(hba);
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			schedule();
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			if (ktime_to_ms(ktime_sub(ktime_get(), start)) >
+						LINERESET_IO_TIMEOUT_MS) {
+				dev_err(hba->dev, "%s: timeout, outstanding=0x%lx\n",
+					__func__, hba->outstanding_reqs);
+				break;
+			}
+		}
+
 		if (!hba->saved_err && !needs_restore)
 			goto skip_err_handling;
 	}
@@ -6302,9 +6321,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
 		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 	}
 
-	if (enabled_intr_status && retval == IRQ_NONE) {
-		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
-					__func__, intr_status);
+	if (enabled_intr_status && retval == IRQ_NONE &&
+				!ufshcd_eh_in_progress(hba)) {
+		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+					__func__,
+					intr_status,
+					hba->ufs_stats.last_intr_status,
+					enabled_intr_status);
 		ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
 	}
 
@@ -6348,7 +6371,11 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
 	 * Even though we use wait_event() which sleeps indefinitely,
 	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
 	 */
-	req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+	req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED |
+						BLK_MQ_REQ_NOWAIT);
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
 	req->end_io_data = &wait;
 	free_slot = req->tag;
 	WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -9355,6 +9382,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
 	hba->tmf_tag_set = (struct blk_mq_tag_set) {
 		.nr_hw_queues	= 1,
+		.reserved_tags	= 1,
 		.queue_depth	= hba->nutmrs,
 		.ops		= &ufshcd_tmf_ops,
 		.flags		= BLK_MQ_F_NO_SCHED,
-- 
2.29.2.729.g45daf8777d-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ