[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.20.1803271454390.41987@chino.kir.corp.google.com>
Date: Tue, 27 Mar 2018 14:57:31 -0700 (PDT)
From: David Rientjes <rientjes@...gle.com>
To: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
cc: paulmck@...ux.vnet.ibm.com, peterz@...radead.org,
Andrew Morton <akpm@...ux-foundation.org>,
kirill@...temov.name, ak@...ux.intel.com, mhocko@...nel.org,
dave@...olabs.net, jack@...e.cz,
Matthew Wilcox <willy@...radead.org>, benh@...nel.crashing.org,
mpe@...erman.id.au, paulus@...ba.org,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, hpa@...or.com,
Will Deacon <will.deacon@....com>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
kemi.wang@...el.com, sergey.senozhatsky.work@...il.com,
Daniel Jordan <daniel.m.jordan@...cle.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
haren@...ux.vnet.ibm.com, khandual@...ux.vnet.ibm.com,
npiggin@...il.com, bsingharora@...il.com,
Tim Chen <tim.c.chen@...ux.intel.com>,
linuxppc-dev@...ts.ozlabs.org, x86@...nel.org
Subject: Re: [PATCH v9 08/24] mm: Protect VMA modifications using VMA sequence
count
On Tue, 13 Mar 2018, Laurent Dufour wrote:
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 5898255d0aeb..d6533cb85213 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -847,17 +847,18 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
> }
>
> if (start != vma->vm_start) {
> - vma->vm_start = start;
> + WRITE_ONCE(vma->vm_start, start);
> start_changed = true;
> }
> if (end != vma->vm_end) {
> - vma->vm_end = end;
> + WRITE_ONCE(vma->vm_end, end);
> end_changed = true;
> }
> - vma->vm_pgoff = pgoff;
> + WRITE_ONCE(vma->vm_pgoff, pgoff);
> if (adjust_next) {
> - next->vm_start += adjust_next << PAGE_SHIFT;
> - next->vm_pgoff += adjust_next;
> + WRITE_ONCE(next->vm_start,
> + next->vm_start + (adjust_next << PAGE_SHIFT));
> + WRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next);
> }
>
> if (root) {
> @@ -1781,6 +1782,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> out:
> perf_event_mmap(vma);
>
> + vm_write_begin(vma);
> vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
> if (vm_flags & VM_LOCKED) {
> if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
> @@ -1803,6 +1805,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
> vma->vm_flags |= VM_SOFTDIRTY;
>
> vma_set_page_prot(vma);
> + vm_write_end(vma);
>
> return addr;
>
Shouldn't this also protect vma->vm_flags?
diff --git a/mm/mmap.c b/mm/mmap.c
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1796,7 +1796,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma == get_gate_vma(current->mm)))
mm->locked_vm += (len >> PAGE_SHIFT);
else
- vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
+ WRITE_ONCE(vma->vm_flags,
+ vma->vm_flags & VM_LOCKED_CLEAR_MASK);
}
if (file)
@@ -1809,7 +1810,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* then new mapped in-place (which must be aimed as
* a completely new data area).
*/
- vma->vm_flags |= VM_SOFTDIRTY;
+ WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY);
vma_set_page_prot(vma);
vm_write_end(vma);
Powered by blists - more mailing lists