[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190521045242.24378-10-dave@stgolabs.net>
Date: Mon, 20 May 2019 21:52:37 -0700
From: Davidlohr Bueso <dave@...olabs.net>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: akpm@...ux-foundation.org, willy@...radead.org, mhocko@...nel.org,
mgorman@...hsingularity.net, jglisse@...hat.com,
ldufour@...ux.vnet.ibm.com, dave@...olabs.net,
Davidlohr Bueso <dbueso@...e.de>
Subject: [PATCH 09/14] virt: teach the mm about range locking
Conversion is straightforward, mmap_sem is used within the
the same function context most of the time. No change in
semantics.
Signed-off-by: Davidlohr Bueso <dbueso@...e.de>
---
virt/kvm/arm/mmu.c | 17 ++++++++++-------
virt/kvm/async_pf.c | 4 ++--
virt/kvm/kvm_main.c | 11 ++++++-----
3 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 74b6582eaa3c..85f8b9ccfabe 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -980,9 +980,10 @@ void stage2_unmap_vm(struct kvm *kvm)
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int idx;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
idx = srcu_read_lock(&kvm->srcu);
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -990,7 +991,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1688,6 +1689,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
bool logging_active = memslot_is_logging(memslot);
+ DEFINE_RANGE_LOCK_FULL(mmrange);
unsigned long vma_pagesize, flags = 0;
write_fault = kvm_is_write_fault(vcpu);
@@ -1700,11 +1702,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
/* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
return -EFAULT;
}
@@ -1725,7 +1727,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
@@ -2280,6 +2282,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
hva_t reg_end = hva + mem->memory_size;
bool writable = !(mem->flags & KVM_MEM_READONLY);
int ret = 0;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
change != KVM_MR_FLAGS_ONLY)
@@ -2293,7 +2296,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -2361,7 +2364,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
return ret;
}
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index e93cd8515134..03d9f9bc5270 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -87,11 +87,11 @@ static void async_pf_execute(struct work_struct *work)
* mm and might be done in another context, so we must
* access remotely.
*/
- down_read(&mm->mmap_sem);
+ mm_read_lock(mm, &mmrange);
get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
&locked, &mmrange);
if (locked)
- up_read(&mm->mmap_sem);
+ mm_read_unlock(mm, &mmrange);
kvm_async_page_present_sync(vcpu, apf);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e1484150a3dd..421652e66a03 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1331,6 +1331,7 @@ EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
{
struct vm_area_struct *vma;
+ DEFINE_RANGE_LOCK_FULL(mmrange);
unsigned long addr, size;
size = PAGE_SIZE;
@@ -1339,7 +1340,7 @@ unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
if (kvm_is_error_hva(addr))
return PAGE_SIZE;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
vma = find_vma(current->mm, addr);
if (!vma)
goto out;
@@ -1347,7 +1348,7 @@ unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
size = vma_kernel_pagesize(vma);
out:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
return size;
}
@@ -1588,8 +1589,8 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
{
struct vm_area_struct *vma;
kvm_pfn_t pfn = 0;
- int npages, r;
DEFINE_RANGE_LOCK_FULL(mmrange);
+ int npages, r;
/* we can do it either atomically or asynchronously, not both */
BUG_ON(atomic && async);
@@ -1604,7 +1605,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
if (npages == 1)
return pfn;
- down_read(¤t->mm->mmap_sem);
+ mm_read_lock(current->mm, &mmrange);
if (npages == -EHWPOISON ||
(!async && check_user_page_hwpoison(addr))) {
pfn = KVM_PFN_ERR_HWPOISON;
@@ -1629,7 +1630,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
pfn = KVM_PFN_ERR_FAULT;
}
exit:
- up_read(¤t->mm->mmap_sem);
+ mm_read_unlock(current->mm, &mmrange);
return pfn;
}
--
2.16.4
Powered by blists - more mailing lists