[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200520202317.1f7515649dd711b388e40d3f@linux-foundation.org>
Date: Wed, 20 May 2020 20:23:17 -0700
From: Andrew Morton <akpm@...ux-foundation.org>
To: Michel Lespinasse <walken@...gle.com>
Cc: linux-mm <linux-mm@...ck.org>, LKML <linux-kernel@...r.kernel.org>,
Peter Zijlstra <peterz@...radead.org>,
Laurent Dufour <ldufour@...ux.ibm.com>,
Vlastimil Babka <vbabka@...e.cz>,
Matthew Wilcox <willy@...radead.org>,
Liam Howlett <Liam.Howlett@...cle.com>,
Jerome Glisse <jglisse@...hat.com>,
Davidlohr Bueso <dave@...olabs.net>,
David Rientjes <rientjes@...gle.com>,
Hugh Dickins <hughd@...gle.com>, Ying Han <yinghan@...gle.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Daniel Jordan <daniel.m.jordan@...cle.com>,
John Hubbard <jhubbard@...dia.com>
Subject: Re: [PATCH v6 05/12] mmap locking API: convert mmap_sem call sites
missed by coccinelle
On Tue, 19 May 2020 22:29:01 -0700 Michel Lespinasse <walken@...gle.com> wrote:
> Convert the last few remaining mmap_sem rwsem calls to use the new
> mmap locking API. These were missed by coccinelle for some reason
> (I think coccinelle does not support some of the preprocessor
> constructs in these files ?)
From: Andrew Morton <akpm@...ux-foundation.org>
Subject: mmap-locking-api-convert-mmap_sem-call-sites-missed-by-coccinelle-fix
convert linux-next leftovers
Cc: Michel Lespinasse <walken@...gle.com>
Cc: Daniel Jordan <daniel.m.jordan@...cle.com>
Cc: Laurent Dufour <ldufour@...ux.ibm.com>
Cc: Vlastimil Babka <vbabka@...e.cz>
Cc: Davidlohr Bueso <dbueso@...e.de>
Cc: David Rientjes <rientjes@...gle.com>
Cc: Hugh Dickins <hughd@...gle.com>
Cc: Jason Gunthorpe <jgg@...pe.ca>
Cc: Jerome Glisse <jglisse@...hat.com>
Cc: John Hubbard <jhubbard@...dia.com>
Cc: Liam Howlett <Liam.Howlett@...cle.com>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Ying Han <yinghan@...gle.com>
Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
---
arch/arm64/kvm/mmu.c | 14 +++++++-------
lib/test_hmm.c | 14 +++++++-------
2 files changed, 14 insertions(+), 14 deletions(-)
--- a/lib/test_hmm.c~mmap-locking-api-convert-mmap_sem-call-sites-missed-by-coccinelle-fix
+++ a/lib/test_hmm.c
@@ -243,9 +243,9 @@ static int dmirror_range_fault(struct dm
}
range->notifier_seq = mmu_interval_read_begin(range->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
@@ -684,7 +684,7 @@ static int dmirror_migrate(struct dmirro
if (!mmget_not_zero(mm))
return -EINVAL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start ||
@@ -711,7 +711,7 @@ static int dmirror_migrate(struct dmirro
dmirror_migrate_finalize_and_map(&args, dmirror);
migrate_vma_finalize(&args);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
/* Return the migrated data for verification. */
@@ -731,7 +731,7 @@ static int dmirror_migrate(struct dmirro
return ret;
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return ret;
}
@@ -823,9 +823,9 @@ static int dmirror_range_snapshot(struct
range->notifier_seq = mmu_interval_read_begin(range->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
--- a/arch/arm64/kvm/mmu.c~mmap-locking-api-convert-mmap_sem-call-sites-missed-by-coccinelle-fix
+++ a/arch/arm64/kvm/mmu.c
@@ -1084,7 +1084,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
- down_read(¤t->mm->mmap_sem);
+ mmap_read_lock(current->mm);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -1092,7 +1092,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
- up_read(¤t->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1848,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcp
}
/* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(¤t->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(¤t->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
@@ -1879,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcp
if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
- up_read(¤t->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
@@ -2456,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struc
(kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
- down_read(¤t->mm->mmap_sem);
+ mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -2515,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struc
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
- up_read(¤t->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
_
Powered by blists - more mailing lists