[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1526555193-7242-17-git-send-email-ldufour@linux.vnet.ibm.com>
Date: Thu, 17 May 2018 13:06:23 +0200
From: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
To: akpm@...ux-foundation.org, mhocko@...nel.org, peterz@...radead.org,
kirill@...temov.name, ak@...ux.intel.com, dave@...olabs.net,
jack@...e.cz, Matthew Wilcox <willy@...radead.org>,
khandual@...ux.vnet.ibm.com, aneesh.kumar@...ux.vnet.ibm.com,
benh@...nel.crashing.org, mpe@...erman.id.au, paulus@...ba.org,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, hpa@...or.com,
Will Deacon <will.deacon@....com>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
sergey.senozhatsky.work@...il.com,
Andrea Arcangeli <aarcange@...hat.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
kemi.wang@...el.com, Daniel Jordan <daniel.m.jordan@...cle.com>,
David Rientjes <rientjes@...gle.com>,
Jerome Glisse <jglisse@...hat.com>,
Ganesh Mahendran <opensource.ganesh@...il.com>,
Minchan Kim <minchan@...nel.org>,
Punit Agrawal <punitagrawal@...il.com>,
vinayak menon <vinayakm.list@...il.com>,
Yang Shi <yang.shi@...ux.alibaba.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
haren@...ux.vnet.ibm.com, npiggin@...il.com, bsingharora@...il.com,
paulmck@...ux.vnet.ibm.com, Tim Chen <tim.c.chen@...ux.intel.com>,
linuxppc-dev@...ts.ozlabs.org, x86@...nel.org
Subject: [PATCH v11 16/26] mm: introduce __vm_normal_page()
When dealing with the speculative fault path we should use the VMA's field
cached value stored in the vm_fault structure.
Currently vm_normal_page() is using the pointer to the VMA to fetch the
vm_flags value. This patch provides a new __vm_normal_page() which is
receiving the vm_flags flags value as parameter.
Note: The speculative path is turned on for architecture providing support
for special PTE flag. So only the first block of vm_normal_page is used
during the speculative path.
Signed-off-by: Laurent Dufour <ldufour@...ux.vnet.ibm.com>
---
include/linux/mm.h | 18 +++++++++++++++---
mm/memory.c | 21 ++++++++++++---------
2 files changed, 27 insertions(+), 12 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f385d721867d..bcebec117d4d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,9 +1317,21 @@ static inline void INIT_VMA(struct vm_area_struct *vma)
#endif
}
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte, bool with_public_device);
-#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
+struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte, bool with_public_device,
+ unsigned long vma_flags);
+static inline struct page *_vm_normal_page(struct vm_area_struct *vma,
+ unsigned long addr, pte_t pte,
+ bool with_public_device)
+{
+ return __vm_normal_page(vma, addr, pte, with_public_device,
+ vma->vm_flags);
+}
+static inline struct page *vm_normal_page(struct vm_area_struct *vma,
+ unsigned long addr, pte_t pte)
+{
+ return _vm_normal_page(vma, addr, pte, false);
+}
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
diff --git a/mm/memory.c b/mm/memory.c
index deac7f12d777..cc4e6221ee7b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -780,7 +780,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
}
/*
- * vm_normal_page -- This function gets the "struct page" associated with a pte.
+ * __vm_normal_page -- This function gets the "struct page" associated with
+ * a pte.
*
* "Special" mappings do not wish to be associated with a "struct page" (either
* it doesn't exist, or it exists but they don't want to touch it). In this
@@ -821,8 +822,9 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
* PFNMAP mappings in order to support COWable mappings.
*
*/
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte, bool with_public_device)
+struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte, bool with_public_device,
+ unsigned long vma_flags)
{
unsigned long pfn = pte_pfn(pte);
@@ -831,7 +833,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
goto check_pfn;
if (vma->vm_ops && vma->vm_ops->find_special_page)
return vma->vm_ops->find_special_page(vma, addr);
- if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+ if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP))
return NULL;
if (is_zero_pfn(pfn))
return NULL;
@@ -863,8 +865,8 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
- if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
- if (vma->vm_flags & VM_MIXEDMAP) {
+ if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
return NULL;
goto out;
@@ -873,7 +875,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
- if (!is_cow_mapping(vma->vm_flags))
+ if (!is_cow_mapping(vma_flags))
return NULL;
}
}
@@ -2753,7 +2755,8 @@ static int do_wp_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
+ vmf->page = __vm_normal_page(vma, vmf->address, vmf->orig_pte, false,
+ vmf->vma_flags);
if (!vmf->page) {
/*
* VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
@@ -3863,7 +3866,7 @@ static int do_numa_page(struct vm_fault *vmf)
ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
update_mmu_cache(vma, vmf->address, vmf->pte);
- page = vm_normal_page(vma, vmf->address, pte);
+ page = __vm_normal_page(vma, vmf->address, pte, false, vmf->vma_flags);
if (!page) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
--
2.7.4
Powered by blists - more mailing lists