[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210107092901.19712-2-zhukeqian1@huawei.com>
Date: Thu, 7 Jan 2021 17:28:57 +0800
From: Keqian Zhu <zhukeqian1@...wei.com>
To: <linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<iommu@...ts.linux-foundation.org>, <kvm@...r.kernel.org>,
<kvmarm@...ts.cs.columbia.edu>,
Alex Williamson <alex.williamson@...hat.com>,
Kirti Wankhede <kwankhede@...dia.com>,
Cornelia Huck <cohuck@...hat.com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Catalin Marinas <catalin.marinas@....com>
CC: Mark Rutland <mark.rutland@....com>,
James Morse <james.morse@....com>,
Robin Murphy <robin.murphy@....com>,
Joerg Roedel <joro@...tes.org>,
"Daniel Lezcano" <daniel.lezcano@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Suzuki K Poulose <suzuki.poulose@....com>,
Julien Thierry <julien.thierry.kdev@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Alexios Zavras <alexios.zavras@...el.com>,
<wanghaibin.wang@...wei.com>, <jiangkunkun@...wei.com>
Subject: [PATCH 1/5] vfio/iommu_type1: Fixes vfio_dma_populate_bitmap to avoid dirty lose
Defer checking whether vfio_dma is of fully-dirty in update_user_bitmap
is easy to lose dirty log. For example, after promoting pinned_scope of
vfio_iommu, vfio_dma is not considered as fully-dirty, then we may lose
dirty log that occurs before vfio_iommu is promoted.
The key point is that pinned-dirty is not a real dirty tracking way, it
can't continuously track dirty pages, but just restrict dirty scope. It
is essentially the same as fully-dirty. Fully-dirty is of full-scope and
pinned-dirty is of pinned-scope.
So we must mark pinned-dirty or fully-dirty after we start dirty tracking
or clear dirty bitmap, to ensure that dirty log is marked right away.
Fixes: d6a4c185660c ("vfio iommu: Implementation of ioctl for dirty pages tracking")
Signed-off-by: Keqian Zhu <zhukeqian1@...wei.com>
---
drivers/vfio/vfio_iommu_type1.c | 33 ++++++++++++++++++++++-----------
1 file changed, 22 insertions(+), 11 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index bceda5e8baaa..b0a26e8e0adf 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -224,7 +224,7 @@ static void vfio_dma_bitmap_free(struct vfio_dma *dma)
dma->bitmap = NULL;
}
-static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
+static void vfio_dma_populate_bitmap_pinned(struct vfio_dma *dma, size_t pgsize)
{
struct rb_node *p;
unsigned long pgshift = __ffs(pgsize);
@@ -236,6 +236,25 @@ static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
}
}
+static void vfio_dma_populate_bitmap_full(struct vfio_dma *dma, size_t pgsize)
+{
+ unsigned long pgshift = __ffs(pgsize);
+ unsigned long nbits = dma->size >> pgshift;
+
+ bitmap_set(dma->bitmap, 0, nbits);
+}
+
+static void vfio_dma_populate_bitmap(struct vfio_iommu *iommu,
+ struct vfio_dma *dma)
+{
+ size_t pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+
+ if (iommu->pinned_page_dirty_scope)
+ vfio_dma_populate_bitmap_pinned(dma, pgsize);
+ else if (dma->iommu_mapped)
+ vfio_dma_populate_bitmap_full(dma, pgsize);
+}
+
static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu)
{
struct rb_node *n;
@@ -257,7 +276,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu)
}
return ret;
}
- vfio_dma_populate_bitmap(dma, pgsize);
+ vfio_dma_populate_bitmap(iommu, dma);
}
return 0;
}
@@ -987,13 +1006,6 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
unsigned long shift = bit_offset % BITS_PER_LONG;
unsigned long leftover;
- /*
- * mark all pages dirty if any IOMMU capable device is not able
- * to report dirty pages and all pages are pinned and mapped.
- */
- if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
- bitmap_set(dma->bitmap, 0, nbits);
-
if (shift) {
bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
nbits + shift);
@@ -1019,7 +1031,6 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
struct vfio_dma *dma;
struct rb_node *n;
unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
- size_t pgsize = (size_t)1 << pgshift;
int ret;
/*
@@ -1055,7 +1066,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
* pages which are marked dirty by vfio_dma_rw()
*/
bitmap_clear(dma->bitmap, 0, dma->size >> pgshift);
- vfio_dma_populate_bitmap(dma, pgsize);
+ vfio_dma_populate_bitmap(iommu, dma);
}
return 0;
}
--
2.19.1
Powered by blists - more mailing lists