[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1455264797-2334-4-git-send-email-eric.auger@linaro.org>
Date: Fri, 12 Feb 2016 08:13:05 +0000
From: Eric Auger <eric.auger@...aro.org>
To: eric.auger@...com, eric.auger@...aro.org,
alex.williamson@...hat.com, will.deacon@....com, joro@...tes.org,
tglx@...utronix.de, jason@...edaemon.net, marc.zyngier@....com,
christoffer.dall@...aro.org, linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.cs.columbia.edu, kvm@...r.kernel.org
Cc: suravee.suthikulpanit@....com, patches@...aro.org,
linux-kernel@...r.kernel.org, Manish.Jaggi@...iumnetworks.com,
Bharat.Bhushan@...escale.com, pranav.sawargaonkar@...il.com,
p.fedin@...sung.com, iommu@...ts.linux-foundation.org,
sherry.hurwitz@....com, brijesh.singh@....com, leo.duran@....com,
Thomas.Lendacky@....com
Subject: [RFC v3 03/15] vfio: introduce VFIO_IOVA_RESERVED vfio_dma type
We introduce a vfio_dma type since we will need to discriminate
legacy vfio_dma's from new reserved ones. Since those latter are
not mapped at registration, some treatments need to be reworked:
removal, replay. Currently they are unplugged. In subsequent patches
they will be reworked.
Signed-off-by: Eric Auger <eric.auger@...aro.org>
---
drivers/vfio/vfio_iommu_type1.c | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c5b57e1..b9326c9 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -53,6 +53,15 @@ module_param_named(disable_hugepages,
MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
+enum vfio_iova_type {
+ VFIO_IOVA_USER = 0, /* standard IOVA used to map user vaddr */
+ /*
+ * IOVA reserved to map special host physical addresses,
+ * MSI frames for instance
+ */
+ VFIO_IOVA_RESERVED,
+};
+
struct vfio_iommu {
struct list_head domain_list;
struct mutex lock;
@@ -75,6 +84,7 @@ struct vfio_dma {
unsigned long vaddr; /* Process virtual addr */
size_t size; /* Map size (bytes) */
int prot; /* IOMMU_READ/WRITE */
+ enum vfio_iova_type type; /* type of IOVA */
};
struct vfio_group {
@@ -418,7 +428,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
{
- vfio_unmap_unpin(iommu, dma);
+ if (likely(dma->type != VFIO_IOVA_RESERVED))
+ vfio_unmap_unpin(iommu, dma);
vfio_unlink_dma(iommu, dma);
kfree(dma);
}
@@ -694,6 +705,10 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
dma_addr_t iova;
dma = rb_entry(n, struct vfio_dma, node);
+
+ if (unlikely(dma->type == VFIO_IOVA_RESERVED))
+ continue;
+
iova = dma->iova;
while (iova < dma->iova + dma->size) {
--
1.9.1
Powered by blists - more mailing lists