[<prev] [next>] [day] [month] [year] [list]
Message-ID: <a6ae9ca3f2aa41d3fe36ced88e58cac3813f8eb5.1663894792.git.quic_asutoshd@quicinc.com>
Date: Thu, 22 Sep 2022 18:05:21 -0700
From: Asutosh Das <quic_asutoshd@...cinc.com>
To: <mani@...nel.org>, <quic_nguyenb@...cinc.com>,
<quic_xiaosenh@...cinc.com>, <quic_cang@...cinc.com>,
<quic_nitirawa@...cinc.com>, <quic_rampraka@...cinc.com>,
<quic_richardp@...cinc.com>, <stanley.chu@...iatek.com>,
<adrian.hunter@...el.com>, <bvanassche@....org>,
<avri.altman@....com>, <beanhuo@...ron.com>,
<martin.petersen@...cle.com>
CC: <linux-scsi@...r.kernel.org>,
Asutosh Das <quic_asutoshd@...cinc.com>,
Alim Akhtar <alim.akhtar@...sung.com>,
"James E.J. Bottomley" <jejb@...ux.ibm.com>,
Matthias Brugger <matthias.bgg@...il.com>,
"Krzysztof Kozlowski" <krzysztof.kozlowski@...aro.org>,
open list <linux-kernel@...r.kernel.org>,
"moderated list:ARM/Mediatek SoC support"
<linux-arm-kernel@...ts.infradead.org>,
"moderated list:ARM/Mediatek SoC support"
<linux-mediatek@...ts.infradead.org>
Subject: [PATCH v1 14/16] ufs: core: mcq: Add completion support in poll
Complete cqe requests in poll. Assumption is that
several poll completion may happen in different CPUs
for the same Completion Queue. Hence a spin lock
protection is added.
Co-developed-by: Can Guo <quic_cang@...cinc.com>
Signed-off-by: Can Guo <quic_cang@...cinc.com>
Signed-off-by: Asutosh Das <quic_asutoshd@...cinc.com>
---
drivers/ufs/core/ufs-mcq.c | 13 +++++++++++++
drivers/ufs/core/ufshcd-priv.h | 2 ++
drivers/ufs/core/ufshcd.c | 7 +++++++
include/ufs/ufshcd.h | 2 ++
4 files changed, 24 insertions(+)
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 46221c4..a8ca4c9 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -392,6 +392,18 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
return completed_reqs;
}
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs;
+
+ spin_lock(&hwq->cq_lock);
+ completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ spin_unlock(&hwq->cq_lock);
+
+ return completed_reqs;
+}
+
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
@@ -485,6 +497,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
hwq = &hba->uhq[i];
hwq->max_entries = hba->nutrs;
spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
}
/* The very first HW queue is to serve device command */
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 417e2ca..6e9bec6 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -64,6 +64,8 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req);
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 0705861..44b6c0b 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -5426,6 +5426,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
+ struct ufs_hw_queue *hwq;
+
+ if (is_mcq_enabled(hba)) {
+ hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+
+ return ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 5731c01..efaa7c2 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -1062,6 +1062,7 @@ struct ufs_hba {
* @sq_lock: serialize submission queue access
* @cq_tail_slot: current slot to which CQ tail pointer is pointing
* @cq_head_slot: current slot to which CQ head pointer is pointing
+ * @cq_lock: Synchronize between multiple polling instances
*/
struct ufs_hw_queue {
void __iomem *mcq_sq_head;
@@ -1079,6 +1080,7 @@ struct ufs_hw_queue {
spinlock_t sq_lock;
u32 cq_tail_slot;
u32 cq_head_slot;
+ spinlock_t cq_lock;
};
static inline bool is_mcq_enabled(struct ufs_hba *hba)
--
2.7.4
Powered by blists - more mailing lists