[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240905124956.84932-1-robdclark@gmail.com>
Date: Thu, 5 Sep 2024 05:49:56 -0700
From: Rob Clark <robdclark@...il.com>
To: iommu@...ts.linux.dev
Cc: linux-arm-msm@...r.kernel.org,
freedreno@...ts.freedesktop.org,
Ashish Mhetre <amhetre@...dia.com>,
Rob Clark <robdclark@...omium.org>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Joerg Roedel <joro@...tes.org>,
linux-arm-kernel@...ts.infradead.org (moderated list:ARM SMMU DRIVERS),
linux-kernel@...r.kernel.org (open list)
Subject: [PATCH] Revert "iommu/io-pgtable-arm: Optimise non-coherent unmap"
From: Rob Clark <robdclark@...omium.org>
This reverts commit 85b715a334583488ad7fbd3001fe6fd617b7d4c0.
It was causing gpu smmu faults on x1e80100.
I _think_ what is causing this is the change in ordering of
__arm_lpae_clear_pte() (dma_sync_single_for_device() on the pgtable
memory) and io_pgtable_tlb_flush_walk(). I'm not entirely sure how
this patch is supposed to work correctly in the face of other
concurrent translations (to buffers unrelated to the one being
unmapped(), because after the io_pgtable_tlb_flush_walk() we can have
stale data read back into the tlb.
Signed-off-by: Rob Clark <robdclark@...omium.org>
---
drivers/iommu/io-pgtable-arm.c | 31 ++++++++++++++-----------------
1 file changed, 14 insertions(+), 17 deletions(-)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 16e51528772d..85261baa3a04 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -274,13 +274,13 @@ static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
}
-static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
+static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
{
- for (int i = 0; i < num_entries; i++)
- ptep[i] = 0;
- if (!cfg->coherent_walk && num_entries)
- __arm_lpae_sync_pte(ptep, num_entries, cfg);
+ *ptep = 0;
+
+ if (!cfg->coherent_walk)
+ __arm_lpae_sync_pte(ptep, 1, cfg);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
@@ -653,28 +653,25 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
num_entries = min_t(int, pgcount, max_entries);
- /* Find and handle non-leaf entries */
- for (i = 0; i < num_entries; i++) {
- pte = READ_ONCE(ptep[i]);
+ while (i < num_entries) {
+ pte = READ_ONCE(*ptep);
if (WARN_ON(!pte))
break;
- if (!iopte_leaf(pte, lvl, iop->fmt)) {
- __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
+ __arm_lpae_clear_pte(ptep, &iop->cfg);
+ if (!iopte_leaf(pte, lvl, iop->fmt)) {
/* Also flush any partial walks */
io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
ARM_LPAE_GRANULE(data));
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
+ } else if (!iommu_iotlb_gather_queued(gather)) {
+ io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
}
- }
- /* Clear the remaining entries */
- __arm_lpae_clear_pte(ptep, &iop->cfg, i);
-
- if (gather && !iommu_iotlb_gather_queued(gather))
- for (int j = 0; j < i; j++)
- io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
+ ptep++;
+ i++;
+ }
return i * size;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
--
2.46.0
Powered by blists - more mailing lists