[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220106004656.126790-7-daniel.m.jordan@oracle.com>
Date: Wed, 5 Jan 2022 19:46:46 -0500
From: Daniel Jordan <daniel.m.jordan@...cle.com>
To: Alexander Duyck <alexanderduyck@...com>,
Alex Williamson <alex.williamson@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Ben Segall <bsegall@...gle.com>,
Cornelia Huck <cohuck@...hat.com>,
Dan Williams <dan.j.williams@...el.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Herbert Xu <herbert@...dor.apana.org.au>,
Ingo Molnar <mingo@...hat.com>,
Jason Gunthorpe <jgg@...dia.com>,
Johannes Weiner <hannes@...xchg.org>,
Josh Triplett <josh@...htriplett.org>,
Michal Hocko <mhocko@...e.com>, Nico Pache <npache@...hat.com>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Peter Zijlstra <peterz@...radead.org>,
Steffen Klassert <steffen.klassert@...unet.com>,
Steve Sistare <steven.sistare@...cle.com>,
Tejun Heo <tj@...nel.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Vincent Guittot <vincent.guittot@...aro.org>
Cc: linux-mm@...ck.org, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-crypto@...r.kernel.org,
Daniel Jordan <daniel.m.jordan@...cle.com>
Subject: [RFC 06/16] vfio/type1: Refactor dma map removal
Do these small refactors to prepare for multithreaded page pinning:
* pass @iova and @end args to vfio_unmap_unpin()
* set iommu_mapped outside of vfio_unmap_unpin()
* split part of vfio_remove_dma() off into vfio_remove_dma_finish()
They all facilitate padata's undo callback, which needs to undo only the
parts of the job that each helper completed successfully.
Signed-off-by: Daniel Jordan <daniel.m.jordan@...cle.com>
---
drivers/vfio/vfio_iommu_type1.c | 23 +++++++++++++++--------
1 file changed, 15 insertions(+), 8 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 26bb2d9b698b..8440e7e2c36d 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1078,16 +1078,16 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain,
}
static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
+ dma_addr_t iova, dma_addr_t end,
bool do_accounting)
{
- dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
struct vfio_domain *domain, *d;
LIST_HEAD(unmapped_region_list);
struct iommu_iotlb_gather iotlb_gather;
int unmapped_region_cnt = 0;
long unlocked = 0;
- if (!dma->size)
+ if (iova == end)
return 0;
if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
@@ -1104,7 +1104,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
struct vfio_domain, next);
list_for_each_entry_continue(d, &iommu->domain_list, next) {
- iommu_unmap(d->domain, dma->iova, dma->size);
+ iommu_unmap(d->domain, iova, end - iova);
cond_resched();
}
@@ -1147,8 +1147,6 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
}
}
- dma->iommu_mapped = false;
-
if (unmapped_region_cnt) {
unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list,
&iotlb_gather);
@@ -1161,10 +1159,11 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
return unlocked;
}
-static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+static void vfio_remove_dma_finish(struct vfio_iommu *iommu,
+ struct vfio_dma *dma)
{
WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
- vfio_unmap_unpin(iommu, dma, true);
+ dma->iommu_mapped = false;
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
vfio_dma_bitmap_free(dma);
@@ -1176,6 +1175,12 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
iommu->dma_avail++;
}
+static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+{
+ vfio_unmap_unpin(iommu, dma, dma->iova, dma->iova + dma->size, true);
+ vfio_remove_dma_finish(iommu, dma);
+}
+
static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
@@ -2466,7 +2471,9 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
long locked = 0, unlocked = 0;
dma = rb_entry(n, struct vfio_dma, node);
- unlocked += vfio_unmap_unpin(iommu, dma, false);
+ unlocked += vfio_unmap_unpin(iommu, dma, dma->iova,
+ dma->iova + dma->size, false);
+ dma->iommu_mapped = false;
p = rb_first(&dma->pfn_list);
for (; p; p = rb_next(p)) {
struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
--
2.34.1
Powered by blists - more mailing lists