[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251020224353.1408-2-jacob.pan@linux.microsoft.com>
Date: Mon, 20 Oct 2025 15:43:52 -0700
From: Jacob Pan <jacob.pan@...ux.microsoft.com>
To: linux-kernel@...r.kernel.org,
"iommu@...ts.linux.dev" <iommu@...ts.linux.dev>,
Will Deacon <will@...nel.org>,
Joerg Roedel <joro@...tes.org>,
Mostafa Saleh <smostafa@...gle.com>,
Jason Gunthorpe <jgg@...dia.com>,
Robin Murphy <robin.murphy@....com>,
Nicolin Chen <nicolinc@...dia.com>
Cc: Jacob Pan <jacob.pan@...ux.microsoft.com>,
Zhang Yu <zhangyu1@...ux.microsoft.com>,
Jean Philippe-Brucker <jean-philippe@...aro.org>,
Alexander Grest <Alexander.Grest@...rosoft.com>
Subject: [PATCH v2 1/2] iommu/arm-smmu-v3: Fix CMDQ timeout warning
While polling for n spaces in the cmdq, the current code instead checks
if the queue is full. If the queue is almost full but not enough space
(<n), then the CMDQ timeout warning is never triggered even if the
polling has exceeded timeout limit.
This patch polls for the availability of exact space instead of full and
emit timeout warning accordingly.
Fixes: 587e6c10a7ce ("iommu/arm-smmu-v3: Reduce contention during command-queue insertion")
Co-developed-by: Yu Zhang <zhangyu1@...ux.microsoft.com>
Signed-off-by: Yu Zhang <zhangyu1@...ux.microsoft.com>
Signed-off-by: Jacob Pan <jacob.pan@...ux.microsoft.com>
---
v2: - Reduced debug print info (Nicolin)
- Use a separate irq flags for exclusive lock
- Handle queue_poll err code other than ETIMEOUT
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 66 ++++++++-------------
1 file changed, 26 insertions(+), 40 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index bf67d9abc901..6959d99c74a3 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -117,12 +117,6 @@ static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
return space >= n;
}
-static bool queue_full(struct arm_smmu_ll_queue *q)
-{
- return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
- Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
-}
-
static bool queue_empty(struct arm_smmu_ll_queue *q)
{
return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
@@ -612,38 +606,6 @@ static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
}
-/* Wait for the command queue to become non-full */
-static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
- struct arm_smmu_cmdq *cmdq,
- struct arm_smmu_ll_queue *llq)
-{
- unsigned long flags;
- struct arm_smmu_queue_poll qp;
- int ret = 0;
-
- /*
- * Try to update our copy of cons by grabbing exclusive cmdq access. If
- * that fails, spin until somebody else updates it for us.
- */
- if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
- WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
- arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
- llq->val = READ_ONCE(cmdq->q.llq.val);
- return 0;
- }
-
- queue_poll_init(smmu, &qp);
- do {
- llq->val = READ_ONCE(cmdq->q.llq.val);
- if (!queue_full(llq))
- break;
-
- ret = queue_poll(&qp);
- } while (!ret);
-
- return ret;
-}
-
/*
* Wait until the SMMU signals a CMD_SYNC completion MSI.
* Must be called with the cmdq lock held in some capacity.
@@ -775,6 +737,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
unsigned long flags;
bool owner;
struct arm_smmu_ll_queue llq, head;
+ struct arm_smmu_queue_poll qp;
int ret = 0;
llq.max_n_shift = cmdq->q.llq.max_n_shift;
@@ -785,10 +748,33 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
do {
u64 old;
+ queue_poll_init(smmu, &qp);
while (!queue_has_space(&llq, n + sync)) {
+ unsigned long iflags;
+
local_irq_restore(flags);
- if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
- dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+ /*
+ * Try to update our copy of cons by grabbing exclusive cmdq access. If
+ * that fails, spin until somebody else updates it for us.
+ */
+ if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, iflags)) {
+ WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
+ arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, iflags);
+ llq.val = READ_ONCE(cmdq->q.llq.val);
+ local_irq_save(flags);
+ continue;
+ }
+
+ ret = queue_poll(&qp);
+ if (ret == -ETIMEDOUT) {
+ dev_err_ratelimited(smmu->dev, "CPU %d CMDQ Timeout, Cons: %08x, Prod: 0x%08x Lock 0x%x\n",
+ smp_processor_id(), llq.cons, llq.prod, atomic_read(&cmdq->lock));
+ queue_poll_init(smmu, &qp);
+ } else if (ret) {
+ dev_err_ratelimited(smmu->dev, "CPU %d CMDQ Poll error %d\n",
+ smp_processor_id(), ret);
+ }
+ llq.val = READ_ONCE(cmdq->q.llq.val);
local_irq_save(flags);
}
--
2.43.0
Powered by blists - more mailing lists