[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251105190638.23172-2-jacob.pan@linux.microsoft.com>
Date: Wed, 5 Nov 2025 11:06:37 -0800
From: Jacob Pan <jacob.pan@...ux.microsoft.com>
To: linux-kernel@...r.kernel.org,
"iommu@...ts.linux.dev" <iommu@...ts.linux.dev>,
Will Deacon <will@...nel.org>,
Joerg Roedel <joro@...tes.org>,
Mostafa Saleh <smostafa@...gle.com>,
Jason Gunthorpe <jgg@...dia.com>,
Robin Murphy <robin.murphy@....com>,
Nicolin Chen <nicolinc@...dia.com>
Cc: Jacob Pan <jacob.pan@...ux.microsoft.com>,
Zhang Yu <zhangyu1@...ux.microsoft.com>,
Jean Philippe-Brucker <jean-philippe@...aro.org>,
Alexander Grest <Alexander.Grest@...rosoft.com>
Subject: [PATCH v3 1/2] iommu/arm-smmu-v3: Fix CMDQ timeout warning
While polling for n spaces in the cmdq, the current code instead checks
if the queue is full. If the queue is almost full but not enough space
(<n), then the CMDQ timeout warning is never triggered even if the
polling has exceeded timeout limit.
The existing arm_smmu_cmdq_poll_until_not_full() doesn't fit efficiently
nor ideally to the only caller arm_smmu_cmdq_issue_cmdlist():
- It uses a new timer at every single call, which fails to limit to the
preset ARM_SMMU_POLL_TIMEOUT_US per issue.
- It has a redundant internal queue_full(), which doesn't detect whether
there is a enough space for number of n commands.
This patch polls for the availability of exact space instead of full and
emit timeout warning accordingly.
Fixes: 587e6c10a7ce ("iommu/arm-smmu-v3: Reduce contention during command-queue insertion")
Co-developed-by: Yu Zhang <zhangyu1@...ux.microsoft.com>
Signed-off-by: Yu Zhang <zhangyu1@...ux.microsoft.com>
Signed-off-by: Jacob Pan <jacob.pan@...ux.microsoft.com>
---
v3:
- Use a helper for cmdq poll instead of open coding (Nicolin)
- Add more explanation in the commit message (Nicolin)
v2: - Reduced debug print info (Nicolin)
- Use a separate irq flags for exclusive lock
- Handle queue_poll err code other than ETIMEOUT
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 43 +++++++++------------
1 file changed, 19 insertions(+), 24 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index bf67d9abc901..86be84c14036 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -117,12 +117,6 @@ static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
return space >= n;
}
-static bool queue_full(struct arm_smmu_ll_queue *q)
-{
- return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
- Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
-}
-
static bool queue_empty(struct arm_smmu_ll_queue *q)
{
return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
@@ -612,13 +606,13 @@ static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
}
-/* Wait for the command queue to become non-full */
-static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
- struct arm_smmu_cmdq *cmdq,
- struct arm_smmu_ll_queue *llq)
+
+static inline void arm_smmu_cmdq_poll(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq *cmdq,
+ struct arm_smmu_ll_queue *llq,
+ struct arm_smmu_queue_poll *qp)
{
unsigned long flags;
- struct arm_smmu_queue_poll qp;
int ret = 0;
/*
@@ -629,19 +623,19 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
llq->val = READ_ONCE(cmdq->q.llq.val);
- return 0;
+ return;
}
- queue_poll_init(smmu, &qp);
- do {
- llq->val = READ_ONCE(cmdq->q.llq.val);
- if (!queue_full(llq))
- break;
-
- ret = queue_poll(&qp);
- } while (!ret);
-
- return ret;
+ ret = queue_poll(qp);
+ if (ret == -ETIMEDOUT) {
+ dev_err_ratelimited(smmu->dev, "CMDQ timed out, cons: %08x, prod: 0x%08x\n",
+ llq->cons, llq->prod);
+ /* Restart the timer */
+ queue_poll_init(smmu, qp);
+ } else if (ret) {
+ dev_err_ratelimited(smmu->dev, "CMDQ poll error %d\n", ret);
+ }
+ llq->val = READ_ONCE(cmdq->q.llq.val);
}
/*
@@ -783,12 +777,13 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
local_irq_save(flags);
llq.val = READ_ONCE(cmdq->q.llq.val);
do {
+ struct arm_smmu_queue_poll qp;
u64 old;
+ queue_poll_init(smmu, &qp);
while (!queue_has_space(&llq, n + sync)) {
local_irq_restore(flags);
- if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
- dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+ arm_smmu_cmdq_poll(smmu, cmdq, &llq, &qp);
local_irq_save(flags);
}
--
2.43.0
Powered by blists - more mailing lists