[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210125090402.1429-5-lushenming@huawei.com>
Date: Mon, 25 Jan 2021 17:04:02 +0800
From: Shenming Lu <lushenming@...wei.com>
To: Alex Williamson <alex.williamson@...hat.com>,
Cornelia Huck <cohuck@...hat.com>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
CC: Jean-Philippe Brucker <jean-philippe@...aro.org>,
Eric Auger <eric.auger@...hat.com>,
Lu Baolu <baolu.lu@...ux.intel.com>,
Kevin Tian <kevin.tian@...el.com>,
<wanghaibin.wang@...wei.com>, <yuzenghui@...wei.com>,
<lushenming@...wei.com>
Subject: [RFC PATCH v1 4/4] vfio: Allow to pin and map dynamically
If IOPF enabled for the whole VFIO container, there is no need to
statically pin and map the entire DMA range, we can do it on demand.
And unmap and unpin according to the IOPF mapped bitmap when removing
the DMA mapping.
Signed-off-by: Shenming Lu <lushenming@...wei.com>
---
drivers/vfio/vfio.c | 20 +++++++++++
drivers/vfio/vfio_iommu_type1.c | 61 ++++++++++++++++++++++++++++++++-
include/linux/vfio.h | 1 +
3 files changed, 81 insertions(+), 1 deletion(-)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index fd885d99ee0f..466959f4d661 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -2404,6 +2404,26 @@ int vfio_iommu_dev_fault_handler(struct iommu_fault *fault, void *data)
}
EXPORT_SYMBOL_GPL(vfio_iommu_dev_fault_handler);
+/*
+ * Return 0 if enabled.
+ */
+int vfio_device_iopf_enabled(struct device *dev, void *data)
+{
+ struct vfio_device *device;
+ int ret = 0;
+
+ device = vfio_device_get_from_dev(dev);
+ if (!device)
+ return -ENODEV;
+
+ if (!device->iopf_enabled)
+ ret = 1;
+
+ vfio_device_put(device);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_device_iopf_enabled);
+
/**
* Module/class support
*/
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index ac6f00c97897..da84155513e4 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -864,6 +864,43 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain,
return unmapped;
}
+static long vfio_clear_iommu_mapped_bitmap(struct vfio_iommu *iommu,
+ struct vfio_dma *dma,
+ bool do_accounting)
+{
+ dma_addr_t iova = dma->iova;
+ size_t size = dma->size;
+ uint64_t i, npages = size / PAGE_SIZE;
+ long unlocked = 0;
+
+ for (i = 0; i < npages; i++, iova += PAGE_SIZE) {
+ if (IOMMU_MAPPED_BITMAP_GET(dma, i)) {
+ struct vfio_domain *d;
+ phys_addr_t phys;
+
+ d = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
+ phys = iommu_iova_to_phys(d->domain, iova);
+ if (WARN_ON(!phys))
+ continue;
+
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ iommu_unmap(d->domain, iova, PAGE_SIZE);
+ cond_resched();
+ }
+ vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
+ 1, do_accounting);
+
+ bitmap_clear(dma->iommu_mapped_bitmap, i, 1);
+ unlocked++;
+ }
+ }
+
+ if (do_accounting)
+ return 0;
+ return unlocked;
+}
+
static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
bool do_accounting)
{
@@ -880,6 +917,10 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
return 0;
+ if (!dma->iommu_mapped)
+ return vfio_clear_iommu_mapped_bitmap(iommu, dma,
+ do_accounting);
+
/*
* We use the IOMMU to track the physical addresses, otherwise we'd
* need a much more complicated tracking system. Unfortunately that
@@ -1302,6 +1343,23 @@ static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
return list_empty(iova);
}
+static bool vfio_iommu_iopf_enabled(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *d;
+
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ struct vfio_group *g;
+
+ list_for_each_entry(g, &d->group_list, next) {
+ if (iommu_group_for_each_dev(g->iommu_group, NULL,
+ vfio_device_iopf_enabled))
+ return false;
+ }
+ }
+
+ return true;
+}
+
static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_map *map)
{
@@ -1408,7 +1466,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
vfio_link_dma(iommu, dma);
/* Don't pin and map if container doesn't contain IOMMU capable domain*/
- if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
+ if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) ||
+ vfio_iommu_iopf_enabled(iommu))
dma->size = size;
else
ret = vfio_pin_map_dma(iommu, dma, size);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 6d535f029f21..cea1e9fd4bb4 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -157,6 +157,7 @@ struct kvm;
extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
extern int vfio_iommu_dev_fault_handler(struct iommu_fault *fault, void *data);
+extern int vfio_device_iopf_enabled(struct device *dev, void *data);
/*
* Sub-module helpers
--
2.19.1
Powered by blists - more mailing lists