[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260122141331.239354-2-nitin.rawat@oss.qualcomm.com>
Date: Thu, 22 Jan 2026 19:43:29 +0530
From: Nitin Rawat <nitin.rawat@....qualcomm.com>
To: mani@...nel.org, James.Bottomley@...senPartnership.com,
martin.petersen@...cle.com
Cc: linux-arm-msm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-scsi@...r.kernel.org, Nitin Rawat <nitin.rawat@....qualcomm.com>
Subject: [PATCH V1 1/3] ufs: ufs-qcom: Add UFS ESI CPU affinity support
Add Enhanced Shared Interrupt (ESI) CPU affinity support to improve
UFS performance on Qualcomm platforms.
By Default, the IRQ core route interrupts to a limited number of
cores while other cores remain idle. This patch enables dynamic
interrupt affinity adjustment for better performance tuning by
distributing ESI interrupts across all online CPUs in round-robin
fashion.
This reduces CPU contention and enables better performance optimization
on Qualcomm UFS controllers by utilizing all available online CPUs.
Signed-off-by: Nitin Rawat <nitin.rawat@....qualcomm.com>
---
drivers/ufs/host/ufs-qcom.c | 50 +++++++++++++++++++++++++++++++++++++
1 file changed, 50 insertions(+)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8ebee0cc5313..c43bb75d208c 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -2070,6 +2070,55 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
return IRQ_HANDLED;
}
+/**
+ * ufs_qcom_set_esi_affinity - Set CPU affinity hints for ESI interrupts
+ * @hba: UFS host controller instance
+ *
+ * Sets CPU affinity hints for ESI interrupts to distribute them across
+ * online CPUs for better performance in round-robin fashion.
+ */
+static void ufs_qcom_set_esi_affinity(struct ufs_hba *hba)
+{
+ struct msi_desc *desc;
+ int ret, i = 0, nr_irqs = 0;
+ const cpumask_t *mask;
+ int cpu;
+
+ __msi_lock_descs(hba->dev);
+ /* Count the number of MSI descriptors */
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ nr_irqs++;
+ }
+ __msi_unlock_descs(hba->dev);
+
+ if (nr_irqs == 0)
+ return;
+
+ __msi_lock_descs(hba->dev);
+ /* Set affinity hints for each interrupt in round-robin fashion */
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (i >= nr_irqs)
+ break;
+
+ /* Distribute interrupts across online CPUs in round-robin */
+ cpu = cpumask_nth(i % num_online_cpus(), cpu_online_mask);
+ mask = get_cpu_mask(cpu);
+ if (!cpumask_subset(mask, cpu_online_mask)) {
+ dev_err(hba->dev, "Invalid CPU %d in map, using online CPUs\n",
+ cpu);
+ mask = cpu_online_mask;
+ }
+
+ ret = irq_set_affinity_hint(desc->irq, mask);
+ if (ret < 0)
+ dev_err(hba->dev, "Failed to set affinity hint to CPU %d for ESI IRQ %d, err = %d\n",
+ cpu, desc->irq, ret);
+
+ i++;
+ }
+ __msi_unlock_descs(hba->dev);
+}
+
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -2122,6 +2171,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
REG_UFS_CFG3);
}
ufshcd_mcq_enable_esi(hba);
+ ufs_qcom_set_esi_affinity(hba);
host->esi_enabled = true;
return 0;
}
--
2.34.1
Powered by blists - more mailing lists