[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <107a23f9-dde0-8e60-6117-4c1f9f6e4e58@linux.ibm.com>
Date: Mon, 18 May 2020 15:23:10 +0200
From: Laurent Dufour <ldufour@...ux.ibm.com>
To: Michel Lespinasse <walken@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-mm <linux-mm@...ck.org>
Cc: LKML <linux-kernel@...r.kernel.org>,
Peter Zijlstra <peterz@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Matthew Wilcox <willy@...radead.org>,
Liam Howlett <Liam.Howlett@...cle.com>,
Jerome Glisse <jglisse@...hat.com>,
Davidlohr Bueso <dave@...olabs.net>,
David Rientjes <rientjes@...gle.com>,
Hugh Dickins <hughd@...gle.com>, Ying Han <yinghan@...gle.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Daniel Jordan <daniel.m.jordan@...cle.com>
Subject: Re: [PATCH v5 05/10] mmap locking API: convert mmap_sem call sites
missed by coccinelle
Le 22/04/2020 à 02:14, Michel Lespinasse a écrit :
> Convert the last few remaining mmap_sem rwsem calls to use the new
> mmap locking API. These were missed by coccinelle for some reason
> (I think coccinelle does not support some of the preprocessor
> constructs in these files ?)
>
> Signed-off-by: Michel Lespinasse <walken@...gle.com>
> Reviewed-by: Daniel Jordan <daniel.m.jordan@...cle.com>
Reviewed-by: Laurent Dufour <ldufour@...ux.ibm.com>
> ---
> arch/mips/mm/fault.c | 10 +++++-----
> arch/riscv/mm/pageattr.c | 4 ++--
> arch/x86/kvm/mmu/paging_tmpl.h | 8 ++++----
> drivers/android/binder_alloc.c | 4 ++--
> fs/proc/base.c | 6 +++---
> 5 files changed, 16 insertions(+), 16 deletions(-)
>
> diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
> index f8d62cd83b36..9ef2dd39111e 100644
> --- a/arch/mips/mm/fault.c
> +++ b/arch/mips/mm/fault.c
> @@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
> if (user_mode(regs))
> flags |= FAULT_FLAG_USER;
> retry:
> - down_read(&mm->mmap_sem);
> + mmap_read_lock(mm);
> vma = find_vma(mm, address);
> if (!vma)
> goto bad_area;
> @@ -190,7 +190,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
> }
> }
>
> - up_read(&mm->mmap_sem);
> + mmap_read_unlock(mm);
> return;
>
> /*
> @@ -198,7 +198,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
> * Fix it, but check if it's kernel or user first..
> */
> bad_area:
> - up_read(&mm->mmap_sem);
> + mmap_read_unlock(mm);
>
> bad_area_nosemaphore:
> /* User mode accesses just cause a SIGSEGV */
> @@ -250,14 +250,14 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
> * We ran out of memory, call the OOM killer, and return the userspace
> * (which will retry the fault, or kill us if we got oom-killed).
> */
> - up_read(&mm->mmap_sem);
> + mmap_read_unlock(mm);
> if (!user_mode(regs))
> goto no_context;
> pagefault_out_of_memory();
> return;
>
> do_sigbus:
> - up_read(&mm->mmap_sem);
> + mmap_read_unlock(mm);
>
> /* Kernel mode? Handle exceptions or die */
> if (!user_mode(regs))
> diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
> index 728759eb530a..b9072c043222 100644
> --- a/arch/riscv/mm/pageattr.c
> +++ b/arch/riscv/mm/pageattr.c
> @@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
> if (!numpages)
> return 0;
>
> - down_read(&init_mm.mmap_sem);
> + mmap_read_lock(&init_mm);
> ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
> &masks);
> - up_read(&init_mm.mmap_sem);
> + mmap_read_unlock(&init_mm);
>
> flush_tlb_kernel_range(start, end);
>
> diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
> index 9bdf9b7d9a96..40e5bb67cc09 100644
> --- a/arch/x86/kvm/mmu/paging_tmpl.h
> +++ b/arch/x86/kvm/mmu/paging_tmpl.h
> @@ -165,22 +165,22 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> unsigned long pfn;
> unsigned long paddr;
>
> - down_read(¤t->mm->mmap_sem);
> + mmap_read_lock(current->mm);
> vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
> if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
> - up_read(¤t->mm->mmap_sem);
> + mmap_read_unlock(current->mm);
> return -EFAULT;
> }
> pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
> paddr = pfn << PAGE_SHIFT;
> table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
> if (!table) {
> - up_read(¤t->mm->mmap_sem);
> + mmap_read_unlock(current->mm);
> return -EFAULT;
> }
> ret = CMPXCHG(&table[index], orig_pte, new_pte);
> memunmap(table);
> - up_read(¤t->mm->mmap_sem);
> + mmap_read_unlock(current->mm);
> }
>
> return (ret != orig_pte);
> diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
> index 5e063739a3a8..cbdc43ed0f9f 100644
> --- a/drivers/android/binder_alloc.c
> +++ b/drivers/android/binder_alloc.c
> @@ -932,7 +932,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
> mm = alloc->vma_vm_mm;
> if (!mmget_not_zero(mm))
> goto err_mmget;
> - if (!down_read_trylock(&mm->mmap_sem))
> + if (!mmap_read_trylock(mm))
> goto err_down_read_mmap_sem_failed;
> vma = binder_alloc_get_vma(alloc);
>
> @@ -946,7 +946,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
>
> trace_binder_unmap_user_end(alloc, index);
> }
> - up_read(&mm->mmap_sem);
> + mmap_read_unlock(mm);
> mmput(mm);
>
> trace_binder_unmap_kernel_start(alloc, index);
> diff --git a/fs/proc/base.c b/fs/proc/base.c
> index 8fff3c955530..ebb356434652 100644
> --- a/fs/proc/base.c
> +++ b/fs/proc/base.c
> @@ -2314,7 +2314,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
> if (!mm)
> goto out_put_task;
>
> - ret = down_read_killable(&mm->mmap_sem);
> + ret = mmap_read_lock_killable(mm);
> if (ret) {
> mmput(mm);
> goto out_put_task;
> @@ -2341,7 +2341,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
> p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
> if (!p) {
> ret = -ENOMEM;
> - up_read(&mm->mmap_sem);
> + mmap_read_unlock(mm);
> mmput(mm);
> goto out_put_task;
> }
> @@ -2350,7 +2350,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
> p->end = vma->vm_end;
> p->mode = vma->vm_file->f_mode;
> }
> - up_read(&mm->mmap_sem);
> + mmap_read_unlock(mm);
> mmput(mm);
>
> for (i = 0; i < nr_files; i++) {
>
Powered by blists - more mailing lists