[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1481661034-3088-15-git-send-email-eric.auger@redhat.com>
Date: Tue, 13 Dec 2016 20:30:32 +0000
From: Eric Auger <eric.auger@...hat.com>
To: eric.auger@...hat.com, eric.auger.pro@...il.com,
christoffer.dall@...aro.org, marc.zyngier@....com,
robin.murphy@....com, alex.williamson@...hat.com,
will.deacon@....com, joro@...tes.org, tglx@...utronix.de,
jason@...edaemon.net, linux-arm-kernel@...ts.infradead.org
Cc: kvm@...r.kernel.org, drjones@...hat.com,
linux-kernel@...r.kernel.org, pranav.sawargaonkar@...il.com,
iommu@...ts.linux-foundation.org, punit.agrawal@....com,
diana.craciun@....com, gpkulkarni@...il.com,
shankerd@...eaurora.org, bharat.bhushan@....com
Subject: [RFC v4 14/16] vfio/type1: Allow transparent MSI IOVA allocation
When attaching a group to the container, check the group's
reserved regions and test whether the IOMMU translates MSI
transactions. If yes, we initialize an IOVA allocator through
the iommu_get_msi_cookie API. This will allow the MSI IOVAs
to be transparently allocated on MSI controller's compose().
Signed-off-by: Eric Auger <eric.auger@...hat.com>
---
v3 -> v4:
- test region's type: IOMMU_RESV_MSI
- restructure the code to prepare for safety assessment
- reword title
---
drivers/vfio/vfio_iommu_type1.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2ba1942..d07fe73 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -36,6 +36,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/workqueue.h>
+#include <linux/dma-iommu.h>
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@...hat.com>"
@@ -734,6 +735,28 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
__free_pages(pages, order);
}
+static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
+ phys_addr_t *base)
+{
+ struct list_head group_resv_regions;
+ struct iommu_resv_region *region, *next;
+ bool ret = false;
+
+ INIT_LIST_HEAD(&group_resv_regions);
+ iommu_get_group_resv_regions(group, &group_resv_regions);
+ list_for_each_entry(region, &group_resv_regions, list) {
+ if (region->type & IOMMU_RESV_MSI) {
+ *base = region->start;
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ list_for_each_entry_safe(region, next, &group_resv_regions, list)
+ kfree(region);
+ return ret;
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
@@ -742,6 +765,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct vfio_domain *domain, *d;
struct bus_type *bus = NULL;
int ret;
+ bool resv_msi;
+ phys_addr_t resv_msi_base;
mutex_lock(&iommu->lock);
@@ -788,6 +813,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_domain;
+ resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
+
INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list);
@@ -834,6 +861,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_detach;
+ if (resv_msi && iommu_get_msi_cookie(domain->domain, resv_msi_base))
+ goto out_detach;
+
list_add(&domain->next, &iommu->domain_list);
mutex_unlock(&iommu->lock);
--
1.9.1
Powered by blists - more mailing lists