[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1515777968-867-5-git-send-email-ldufour@linux.vnet.ibm.com>
Date: Fri, 12 Jan 2018 18:25:48 +0100
From: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
To: paulmck@...ux.vnet.ibm.com, peterz@...radead.org,
akpm@...ux-foundation.org, kirill@...temov.name,
ak@...ux.intel.com, mhocko@...nel.org, dave@...olabs.net,
jack@...e.cz, Matthew Wilcox <willy@...radead.org>,
benh@...nel.crashing.org, mpe@...erman.id.au, paulus@...ba.org,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, hpa@...or.com,
Will Deacon <will.deacon@....com>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
kemi.wang@...el.com, sergey.senozhatsky.work@...il.com
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
haren@...ux.vnet.ibm.com, khandual@...ux.vnet.ibm.com,
npiggin@...il.com, bsingharora@...il.com,
Tim Chen <tim.c.chen@...ux.intel.com>,
linuxppc-dev@...ts.ozlabs.org, x86@...nel.org
Subject: [PATCH v6 04/24] mm: Prepare for FAULT_FLAG_SPECULATIVE
From: Peter Zijlstra <peterz@...radead.org>
When speculating faults (without holding mmap_sem) we need to validate
that the vma against which we loaded pages is still valid when we're
ready to install the new PTE.
Therefore, replace the pte_offset_map_lock() calls that (re)take the
PTL with pte_map_lock() which can fail in case we find the VMA changed
since we started the fault.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
[Port to 4.12 kernel]
[Remove the comment about the fault_env structure which has been
implemented as the vm_fault structure in the kernel]
Signed-off-by: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
---
include/linux/mm.h | 1 +
mm/memory.c | 56 ++++++++++++++++++++++++++++++++++++++----------------
2 files changed, 41 insertions(+), 16 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 63f7ba111f64..ad299ed7b85c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -302,6 +302,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
+#define FAULT_FLAG_SPECULATIVE 0x200 /* Speculative fault, not holding mmap_sem */
#define FAULT_FLAG_TRACE \
{ FAULT_FLAG_WRITE, "WRITE" }, \
diff --git a/mm/memory.c b/mm/memory.c
index 259f621345b2..868424ab850c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2438,6 +2438,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
}
+static bool pte_map_lock(struct vm_fault *vmf)
+{
+ vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+ vmf->address, &vmf->ptl);
+ return true;
+}
+
/*
* Handle the case of a page which we actually need to copy to a new page.
*
@@ -2465,6 +2472,7 @@ static int wp_page_copy(struct vm_fault *vmf)
const unsigned long mmun_start = vmf->address & PAGE_MASK;
const unsigned long mmun_end = mmun_start + PAGE_SIZE;
struct mem_cgroup *memcg;
+ int ret = VM_FAULT_OOM;
if (unlikely(anon_vma_prepare(vma)))
goto oom;
@@ -2492,7 +2500,11 @@ static int wp_page_copy(struct vm_fault *vmf)
/*
* Re-check the pte - we dropped the lock
*/
- vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
+ if (!pte_map_lock(vmf)) {
+ mem_cgroup_cancel_charge(new_page, memcg, false);
+ ret = VM_FAULT_RETRY;
+ goto oom_free_new;
+ }
if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
if (old_page) {
if (!PageAnon(old_page)) {
@@ -2584,7 +2596,7 @@ static int wp_page_copy(struct vm_fault *vmf)
oom:
if (old_page)
put_page(old_page);
- return VM_FAULT_OOM;
+ return ret;
}
/**
@@ -2605,8 +2617,8 @@ static int wp_page_copy(struct vm_fault *vmf)
int finish_mkwrite_fault(struct vm_fault *vmf)
{
WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
- vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
- &vmf->ptl);
+ if (!pte_map_lock(vmf))
+ return VM_FAULT_RETRY;
/*
* We might have raced with another page fault while we released the
* pte_offset_map_lock.
@@ -2724,8 +2736,11 @@ static int do_wp_page(struct vm_fault *vmf)
get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
lock_page(vmf->page);
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
+ if (!pte_map_lock(vmf)) {
+ unlock_page(vmf->page);
+ put_page(vmf->page);
+ return VM_FAULT_RETRY;
+ }
if (!pte_same(*vmf->pte, vmf->orig_pte)) {
unlock_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2953,8 +2968,10 @@ int do_swap_page(struct vm_fault *vmf)
* Back out if somebody else faulted in this pte
* while we released the pte lock.
*/
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
+ if (!pte_map_lock(vmf)) {
+ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+ return VM_FAULT_RETRY;
+ }
if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
ret = VM_FAULT_OOM;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -3010,8 +3027,11 @@ int do_swap_page(struct vm_fault *vmf)
/*
* Back out if somebody else already faulted in this pte.
*/
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
- &vmf->ptl);
+ if (!pte_map_lock(vmf)) {
+ ret = VM_FAULT_RETRY;
+ mem_cgroup_cancel_charge(page, memcg, false);
+ goto out_page;
+ }
if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
goto out_nomap;
@@ -3140,8 +3160,8 @@ static int do_anonymous_page(struct vm_fault *vmf)
!mm_forbids_zeropage(vma->vm_mm)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
vma->vm_page_prot));
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
+ if (!pte_map_lock(vmf))
+ return VM_FAULT_RETRY;
if (!pte_none(*vmf->pte))
goto unlock;
ret = check_stable_address_space(vma->vm_mm);
@@ -3176,8 +3196,11 @@ static int do_anonymous_page(struct vm_fault *vmf)
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
- &vmf->ptl);
+ if (!pte_map_lock(vmf)) {
+ mem_cgroup_cancel_charge(page, memcg, false);
+ put_page(page);
+ return VM_FAULT_RETRY;
+ }
if (!pte_none(*vmf->pte))
goto release;
@@ -3301,8 +3324,9 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
* pte_none() under vmf->ptl protection when we return to
* alloc_set_pte().
*/
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
- &vmf->ptl);
+ if (!pte_map_lock(vmf))
+ return VM_FAULT_RETRY;
+
return 0;
}
--
2.7.4
Powered by blists - more mailing lists