[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230918-viommu-sync-map-v2-1-f33767f6cf7a@linux.ibm.com>
Date: Mon, 18 Sep 2023 13:51:43 +0200
From: Niklas Schnelle <schnelle@...ux.ibm.com>
To: Jean-Philippe Brucker <jean-philippe@...aro.org>,
Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>
Cc: virtualization@...ts.linux-foundation.org, iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org,
Niklas Schnelle <schnelle@...ux.ibm.com>
Subject: [PATCH v2 1/2] iommu/virtio: Make use of ops->iotlb_sync_map
Pull out the sync operation from viommu_map_pages() by implementing
ops->iotlb_sync_map. This allows the common IOMMU code to map multiple
elements of an sg with a single sync (see iommu_map_sg()). Furthermore,
it is also a requirement for IOMMU_CAP_DEFERRED_FLUSH.
Link: https://lore.kernel.org/lkml/20230726111433.1105665-1-schnelle@linux.ibm.com/
Signed-off-by: Niklas Schnelle <schnelle@...ux.ibm.com>
---
drivers/iommu/virtio-iommu.c | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 17dcd826f5c2..3649586f0e5c 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -189,6 +189,12 @@ static int viommu_sync_req(struct viommu_dev *viommu)
int ret;
unsigned long flags;
+ /*
+ * .iotlb_sync_map and .flush_iotlb_all may be called before the viommu
+ * is initialized e.g. via iommu_create_device_direct_mappings()
+ */
+ if (!viommu)
+ return 0;
spin_lock_irqsave(&viommu->request_lock, flags);
ret = __viommu_sync_req(viommu);
if (ret)
@@ -843,7 +849,7 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
.flags = cpu_to_le32(flags),
};
- ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
+ ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
if (ret) {
viommu_del_mappings(vdomain, iova, end);
return ret;
@@ -912,6 +918,14 @@ static void viommu_iotlb_sync(struct iommu_domain *domain,
viommu_sync_req(vdomain->viommu);
}
+static int viommu_iotlb_sync_map(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+ return viommu_sync_req(vdomain->viommu);
+}
+
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
@@ -1058,6 +1072,7 @@ static struct iommu_ops viommu_ops = {
.unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
+ .iotlb_sync_map = viommu_iotlb_sync_map,
.free = viommu_domain_free,
}
};
--
2.39.2
Powered by blists - more mailing lists