[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1493308376-23851-4-git-send-email-ldufour@linux.vnet.ibm.com>
Date: Thu, 27 Apr 2017 17:52:42 +0200
From: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
To: paulmck@...ux.vnet.ibm.com, peterz@...radead.org,
akpm@...ux-foundation.org, kirill@...temov.name,
ak@...ux.intel.com, mhocko@...nel.org, dave@...olabs.net,
jack@...e.cz
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
haren@...ux.vnet.ibm.com, khandual@...ux.vnet.ibm.com,
npiggin@...il.com, bsingharora@...il.com
Subject: [RFC v3 03/17] mm: Introduce pte_spinlock
This is needed because in handle_pte_fault() pte_offset_map() is
called and then fe->ptl is fetched and spin_locked.
This was previously embedded in the call to pte_offset_map_lock().
Signed-off-by: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
---
mm/memory.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index bce32c9d73c2..441c0e3f3a0f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2100,6 +2100,13 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
}
+static bool pte_spinlock(struct vm_fault *vmf)
+{
+ vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+ spin_lock(vmf->ptl);
+ return true;
+}
+
static bool pte_map_lock(struct vm_fault *vmf)
{
vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl);
@@ -3398,8 +3405,8 @@ static int do_numa_page(struct vm_fault *vmf)
* page table entry is not accessible, so there would be no
* concurrent hardware modifications to the PTE.
*/
- vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
- spin_lock(vmf->ptl);
+ if (!pte_spinlock(vmf))
+ return VM_FAULT_RETRY;
if (unlikely(!pte_same(*vmf->pte, pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
goto out;
@@ -3566,8 +3573,8 @@ static int handle_pte_fault(struct vm_fault *vmf)
if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
return do_numa_page(vmf);
- vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
- spin_lock(vmf->ptl);
+ if (!pte_spinlock(vmf))
+ return VM_FAULT_RETRY;
entry = vmf->orig_pte;
if (unlikely(!pte_same(*vmf->pte, entry)))
goto unlock;
--
2.7.4
Powered by blists - more mailing lists