[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250309084110.713285420@linutronix.de>
Date: Sun, 9 Mar 2025 09:41:56 +0100 (CET)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Marc Zyngier <maz@...nel.org>,
Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>,
"James E.J. Bottomley" <James.Bottomley@...senPartnership.com>,
"Martin K. Petersen" <martin.petersen@...cle.com>,
linux-scsi@...r.kernel.org,
Nishanth Menon <nm@...com>,
Tero Kristo <kristo@...nel.org>,
Santosh Shilimkar <ssantosh@...nel.org>,
Jon Mason <jdmason@...zu.us>,
Dave Jiang <dave.jiang@...el.com>,
Allen Hubbe <allenbh@...il.com>,
ntb@...ts.linux.dev,
Bjorn Helgaas <bhelgaas@...gle.com>,
linux-pci@...r.kernel.org,
Haiyang Zhang <haiyangz@...rosoft.com>,
Wei Liu <wei.liu@...nel.org>,
linux-hyperv@...r.kernel.org,
Wei Huang <wei.huang2@....com>
Subject: [patch 09/10] scsi: ufs: qcom: Remove the MSI descriptor abuse
The driver abuses the MSI descriptors for internal purposes. Aside of core
code and MSI providers nothing has to care about their existence. They have
been encapsulated with a lot of effort because this kind of abuse caused
all sorts of issues including a maintainability nightmare.
Rewrite the code so it uses dedicated storage to hand the required
information to the interrupt handler.
No functional change intended.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Manivannan Sadhasivam <manivannan.sadhasivam@...aro.org>
Cc: "James E.J. Bottomley" <James.Bottomley@...senPartnership.com>
Cc: "Martin K. Petersen" <martin.petersen@...cle.com>
Cc: linux-scsi@...r.kernel.org
---
drivers/ufs/host/ufs-qcom.c | 77 ++++++++++++++++++++++----------------------
1 file changed, 40 insertions(+), 37 deletions(-)
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1782,15 +1782,19 @@ static void ufs_qcom_write_msi_msg(struc
ufshcd_mcq_config_esi(hba, msg);
}
+struct ufs_qcom_irq {
+ unsigned int irq;
+ unsigned int idx;
+ struct ufs_hba *hba;
+};
+
static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
{
- struct msi_desc *desc = data;
- struct device *dev = msi_desc_to_dev(desc);
- struct ufs_hba *hba = dev_get_drvdata(dev);
- u32 id = desc->msi_index;
- struct ufs_hw_queue *hwq = &hba->uhq[id];
+ struct ufs_qcom_irq *qi = data;
+ struct ufs_hba *hba = qi->hba;
+ struct ufs_hw_queue *hwq = &hba->uhq[qi->idx];
- ufshcd_mcq_write_cqis(hba, 0x1, id);
+ ufshcd_mcq_write_cqis(hba, 0x1, qi->idx);
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
@@ -1799,8 +1803,7 @@ static irqreturn_t ufs_qcom_mcq_esi_hand
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct msi_desc *desc;
- struct msi_desc *failed_desc = NULL;
+ struct ufs_qcom_irq *qi;
int nr_irqs, ret;
if (host->esi_enabled)
@@ -1811,47 +1814,47 @@ static int ufs_qcom_config_esi(struct uf
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
+ if (qi)
+ return -ENOMEM;
+
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
- return ret;
+ goto cleanup;
}
- msi_lock_descs(hba->dev);
- msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- ret = devm_request_irq(hba->dev, desc->irq,
- ufs_qcom_mcq_esi_handler,
- IRQF_SHARED, "qcom-mcq-esi", desc);
+ for (int idx = 0; idx < nr_irqs; idx++) {
+ qi[idx].irq = msi_get_virq(hba->dev, idx);
+ qi[idx].idx = idx;
+ qi[idx].hba = hba;
+
+ ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", qi + idx);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
- __func__, desc->irq, ret);
- failed_desc = desc;
- break;
+ __func__, qi[idx].irq, ret);
+ qi[idx].irq = 0;
+ goto cleanup;
}
}
- msi_unlock_descs(hba->dev);
- if (ret) {
- /* Rewind */
- msi_lock_descs(hba->dev);
- msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- if (desc == failed_desc)
- break;
- devm_free_irq(hba->dev, desc->irq, hba);
- }
- msi_unlock_descs(hba->dev);
- platform_device_msi_free_irqs_all(hba->dev);
- } else {
- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
- host->hw_ver.step == 0)
- ufshcd_rmwl(hba, ESI_VEC_MASK,
- FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
- REG_UFS_CFG3);
- ufshcd_mcq_enable_esi(hba);
- host->esi_enabled = true;
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_rmwl(hba, ESI_VEC_MASK,
+ FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
}
-
+ ufshcd_mcq_enable_esi(hba);
+ host->esi_enabled = true;
+ return 0;
+
+cleanup:
+ for (int idx = 0; qi[idx].irq; idx++)
+ devm_free_irq(hba->dev, qi[idx].irq, hba);
+ platform_device_msi_free_irqs_all(hba->dev);
+ devm_kfree(hba->dev, qi);
return ret;
}
Powered by blists - more mailing lists