[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <171026725393.8367.17497620074051138306.stgit@linux.ibm.com>
Date: Tue, 12 Mar 2024 13:14:20 -0500
From: Shivaprasad G Bhat <sbhat@...ux.ibm.com>
To: tpearson@...torengineering.com, alex.williamson@...hat.com,
linuxppc-dev@...ts.ozlabs.org
Cc: mpe@...erman.id.au, npiggin@...il.com, christophe.leroy@...roup.eu,
aneesh.kumar@...nel.org, naveen.n.rao@...ux.ibm.com,
gbatra@...ux.vnet.ibm.com, brking@...ux.vnet.ibm.com,
sbhat@...ux.ibm.com, aik@...abs.ru, jgg@...pe.ca, robh@...nel.org,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org, aik@....com,
msuchanek@...e.de, jroedel@...e.de, vaibhav@...ux.ibm.com,
svaidy@...ux.ibm.com
Subject: [RFC PATCH 1/3] powerpc/pseries/iommu: Bring back userspace view for
single level TCE tables
The commit 090bad39b237a ("powerpc/powernv: Add indirect levels to
it_userspace") which implemented the tce indirect levels
support for PowerNV ended up removing the single level support
which existed by default(generic tce_iommu_userspace_view_alloc/free()
calls). On pSeries the TCEs are single level, and the allocation
of userspace view is lost with the removal of generic code.
The patch attempts to bring it back for pseries on the refactored
code base.
On pSeries, the windows/tables are "borrowed", so the it_ops->free()
is not called during the container detach or the tce release call paths
as the table is not really freed. So, decoupling the userspace view
array free and alloc from table's it_ops just the way it was before.
Signed-off-by: Shivaprasad G Bhat <sbhat@...ux.ibm.com>
---
arch/powerpc/platforms/pseries/iommu.c | 19 ++++++++++--
drivers/vfio/vfio_iommu_spapr_tce.c | 51 ++++++++++++++++++++++++++++++++
2 files changed, 67 insertions(+), 3 deletions(-)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index e8c4129697b1..40de8d55faef 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -143,7 +143,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
}
-static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
+static void tce_clear_pSeries(struct iommu_table *tbl, long index, long npages)
{
__be64 *tcep;
@@ -162,6 +162,11 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
return be64_to_cpu(*tcep);
}
+static void tce_free_pSeries(struct iommu_table *tbl)
+{
+ /* Do nothing. */
+}
+
static void tce_free_pSeriesLP(unsigned long liobn, long, long, long);
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
@@ -576,7 +581,7 @@ struct iommu_table_ops iommu_table_lpar_multi_ops;
struct iommu_table_ops iommu_table_pseries_ops = {
.set = tce_build_pSeries,
- .clear = tce_free_pSeries,
+ .clear = tce_clear_pSeries,
.get = tce_get_pseries
};
@@ -685,15 +690,23 @@ static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
return rc;
}
+
+static __be64 *tce_useraddr_pSeriesLP(struct iommu_table *tbl, long index,
+ bool __always_unused alloc)
+{
+ return tbl->it_userspace ? &tbl->it_userspace[index - tbl->it_offset] : NULL;
+}
#endif
struct iommu_table_ops iommu_table_lpar_multi_ops = {
.set = tce_buildmulti_pSeriesLP,
#ifdef CONFIG_IOMMU_API
.xchg_no_kill = tce_exchange_pseries,
+ .useraddrptr = tce_useraddr_pSeriesLP,
#endif
.clear = tce_freemulti_pSeriesLP,
- .get = tce_get_pSeriesLP
+ .get = tce_get_pSeriesLP,
+ .free = tce_free_pSeries
};
/*
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index a94ec6225d31..1cf36d687559 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -177,6 +177,50 @@ static long tce_iommu_register_pages(struct tce_container *container,
return ret;
}
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+ struct mm_struct *mm)
+{
+ unsigned long cb = ALIGN(sizeof(tbl->it_userspace[0]) *
+ tbl->it_size, PAGE_SIZE);
+ unsigned long *uas;
+ long ret;
+
+ if (tbl->it_indirect_levels)
+ return 0;
+
+ WARN_ON(tbl->it_userspace);
+
+ ret = account_locked_vm(mm, cb >> PAGE_SHIFT, true);
+ if (ret)
+ return ret;
+
+ uas = vzalloc(cb);
+ if (!uas) {
+ account_locked_vm(mm, cb >> PAGE_SHIFT, false);
+ return -ENOMEM;
+ }
+ tbl->it_userspace = (__be64 *) uas;
+
+ return 0;
+}
+
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+ struct mm_struct *mm)
+{
+ unsigned long cb = ALIGN(sizeof(tbl->it_userspace[0]) *
+ tbl->it_size, PAGE_SIZE);
+
+ if (!tbl->it_userspace)
+ return;
+
+ if (tbl->it_indirect_levels)
+ return;
+
+ vfree(tbl->it_userspace);
+ tbl->it_userspace = NULL;
+ account_locked_vm(mm, cb >> PAGE_SHIFT, false);
+}
+
static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
unsigned int it_page_shift)
{
@@ -554,6 +598,12 @@ static long tce_iommu_build_v2(struct tce_container *container,
unsigned long hpa;
enum dma_data_direction dirtmp;
+ if (!tbl->it_userspace) {
+ ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
@@ -637,6 +687,7 @@ static void tce_iommu_free_table(struct tce_container *container,
{
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
+ tce_iommu_userspace_view_free(tbl, container->mm);
iommu_tce_table_put(tbl);
account_locked_vm(container->mm, pages, false);
}
Powered by blists - more mailing lists