[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210107044401.19828-2-zhukeqian1@huawei.com>
Date: Thu, 7 Jan 2021 12:43:56 +0800
From: Keqian Zhu <zhukeqian1@...wei.com>
To: <linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<iommu@...ts.linux-foundation.org>, <kvm@...r.kernel.org>,
<kvmarm@...ts.cs.columbia.edu>,
Alex Williamson <alex.williamson@...hat.com>,
Cornelia Huck <cohuck@...hat.com>,
Will Deacon <will@...nel.org>, "Marc Zyngier" <maz@...nel.org>,
Catalin Marinas <catalin.marinas@....com>
CC: Mark Rutland <mark.rutland@....com>,
James Morse <james.morse@....com>,
Robin Murphy <robin.murphy@....com>,
Joerg Roedel <joro@...tes.org>,
"Daniel Lezcano" <daniel.lezcano@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Suzuki K Poulose <suzuki.poulose@....com>,
Julien Thierry <julien.thierry.kdev@...il.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Alexios Zavras <alexios.zavras@...el.com>,
<wanghaibin.wang@...wei.com>, <jiangkunkun@...wei.com>
Subject: [PATCH 1/6] vfio/iommu_type1: Make an explicit "promote" semantic
When we want to promote the pinned_page_dirty_scope of vfio_iommu,
we call the "update" function to visit all vfio_group, but when we
want to downgrade this, we can set the flag as false directly.
So we'd better make an explicit "promote" semantic to the "update"
function. BTW, if vfio_iommu already has been promoted, then return
early.
Signed-off-by: Keqian Zhu <zhukeqian1@...wei.com>
---
drivers/vfio/vfio_iommu_type1.c | 30 ++++++++++++++----------------
1 file changed, 14 insertions(+), 16 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0b4dedaa9128..334a8240e1da 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -148,7 +148,7 @@ static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group);
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu);
/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
@@ -714,7 +714,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
group = vfio_iommu_find_iommu_group(iommu, iommu_group);
if (!group->pinned_page_dirty_scope) {
group->pinned_page_dirty_scope = true;
- update_pinned_page_dirty_scope(iommu);
+ promote_pinned_page_dirty_scope(iommu);
}
goto pin_done;
@@ -1622,27 +1622,26 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group;
}
-static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+static void promote_pinned_page_dirty_scope(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
struct vfio_group *group;
+ if (iommu->pinned_page_dirty_scope)
+ return;
+
list_for_each_entry(domain, &iommu->domain_list, next) {
list_for_each_entry(group, &domain->group_list, next) {
- if (!group->pinned_page_dirty_scope) {
- iommu->pinned_page_dirty_scope = false;
+ if (!group->pinned_page_dirty_scope)
return;
- }
}
}
if (iommu->external_domain) {
domain = iommu->external_domain;
list_for_each_entry(group, &domain->group_list, next) {
- if (!group->pinned_page_dirty_scope) {
- iommu->pinned_page_dirty_scope = false;
+ if (!group->pinned_page_dirty_scope)
return;
- }
}
}
@@ -2057,8 +2056,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
* addition of a dirty tracking group.
*/
group->pinned_page_dirty_scope = true;
- if (!iommu->pinned_page_dirty_scope)
- update_pinned_page_dirty_scope(iommu);
+ promote_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
return 0;
@@ -2341,7 +2339,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain;
struct vfio_group *group;
- bool update_dirty_scope = false;
+ bool promote_dirty_scope = false;
LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock);
@@ -2349,7 +2347,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (iommu->external_domain) {
group = find_iommu_group(iommu->external_domain, iommu_group);
if (group) {
- update_dirty_scope = !group->pinned_page_dirty_scope;
+ promote_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
@@ -2379,7 +2377,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
continue;
vfio_iommu_detach_group(domain, group);
- update_dirty_scope = !group->pinned_page_dirty_scope;
+ promote_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
/*
@@ -2415,8 +2413,8 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
* Removal of a group without dirty tracking may allow the iommu scope
* to be promoted.
*/
- if (update_dirty_scope)
- update_pinned_page_dirty_scope(iommu);
+ if (promote_dirty_scope)
+ promote_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
}
--
2.19.1
Powered by blists - more mailing lists