[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250924175438.7450-2-jacob.pan@linux.microsoft.com>
Date: Wed, 24 Sep 2025 10:54:37 -0700
From: Jacob Pan <jacob.pan@...ux.microsoft.com>
To: linux-kernel@...r.kernel.org,
"iommu@...ts.linux.dev" <iommu@...ts.linux.dev>,
Will Deacon <will@...nel.org>,
Jason Gunthorpe <jgg@...dia.com>,
Robin Murphy <robin.murphy@....com>,
Nicolin Chen <nicolinc@...dia.com>
Cc: Jacob Pan <jacob.pan@...ux.microsoft.com>,
Zhang Yu <zhangyu1@...ux.microsoft.com>,
Jean Philippe-Brucker <jean-philippe@...aro.org>,
Alexander Grest <Alexander.Grest@...rosoft.com>
Subject: [PATCH 1/2] iommu/arm-smmu-v3: Fix CMDQ timeout warning
While polling for n spaces in the cmdq, the current code instead checks
if the queue is full. If the queue is almost full but not enough space
(<n), then the CMDQ timeout warning is never triggered even if the
polling has exceeded timeout limit.
This patch polls for the availability of exact space instead of full and
emit timeout warning accordingly.
Fixes: 587e6c10a7ce ("iommu/arm-smmu-v3: Reduce contention during command-queue insertion")
Co-developed-by: Yu Zhang <zhangyu1@...ux.microsoft.com>
Signed-off-by: Yu Zhang <zhangyu1@...ux.microsoft.com>
Signed-off-by: Jacob Pan <jacob.pan@...ux.microsoft.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 61 +++++++--------------
1 file changed, 21 insertions(+), 40 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 2a8b46b948f0..9b63525c13bb 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -138,12 +138,6 @@ static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
return space >= n;
}
-static bool queue_full(struct arm_smmu_ll_queue *q)
-{
- return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
- Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
-}
-
static bool queue_empty(struct arm_smmu_ll_queue *q)
{
return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
@@ -633,38 +627,6 @@ static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
}
-/* Wait for the command queue to become non-full */
-static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
- struct arm_smmu_cmdq *cmdq,
- struct arm_smmu_ll_queue *llq)
-{
- unsigned long flags;
- struct arm_smmu_queue_poll qp;
- int ret = 0;
-
- /*
- * Try to update our copy of cons by grabbing exclusive cmdq access. If
- * that fails, spin until somebody else updates it for us.
- */
- if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
- WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
- arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
- llq->val = READ_ONCE(cmdq->q.llq.val);
- return 0;
- }
-
- queue_poll_init(smmu, &qp);
- do {
- llq->val = READ_ONCE(cmdq->q.llq.val);
- if (!queue_full(llq))
- break;
-
- ret = queue_poll(&qp);
- } while (!ret);
-
- return ret;
-}
-
/*
* Wait until the SMMU signals a CMD_SYNC completion MSI.
* Must be called with the cmdq lock held in some capacity.
@@ -796,6 +758,7 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
unsigned long flags;
bool owner;
struct arm_smmu_ll_queue llq, head;
+ struct arm_smmu_queue_poll qp;
int ret = 0;
llq.max_n_shift = cmdq->q.llq.max_n_shift;
@@ -806,10 +769,28 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
do {
u64 old;
+ queue_poll_init(smmu, &qp);
while (!queue_has_space(&llq, n + sync)) {
local_irq_restore(flags);
- if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
- dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+ /*
+ * Try to update our copy of cons by grabbing exclusive cmdq access. If
+ * that fails, spin until somebody else updates it for us.
+ */
+ if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
+ WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
+ arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
+ llq.val = READ_ONCE(cmdq->q.llq.val);
+ local_irq_save(flags);
+ continue;
+ }
+
+ ret = queue_poll(&qp);
+ if (ret == -ETIMEDOUT) {
+ dev_err_ratelimited(smmu->dev, "CPU %d CMDQ Timeout, C: %08x, CW:%x P: 0x%08x PW: %x cmdq.lock 0x%x\n",
+ smp_processor_id(), Q_IDX(&llq, llq.cons), Q_WRP(&llq, llq.cons), Q_IDX(&llq, llq.prod), Q_WRP(&llq, llq.prod), atomic_read(&cmdq->lock));
+ queue_poll_init(smmu, &qp);
+ }
+ llq.val = READ_ONCE(cmdq->q.llq.val);
local_irq_save(flags);
}
--
2.43.0
Powered by blists - more mailing lists