[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190422201504.GG14666@redhat.com>
Date: Mon, 22 Apr 2019 16:15:05 -0400
From: Jerome Glisse <jglisse@...hat.com>
To: Laurent Dufour <ldufour@...ux.ibm.com>
Cc: akpm@...ux-foundation.org, mhocko@...nel.org, peterz@...radead.org,
kirill@...temov.name, ak@...ux.intel.com, dave@...olabs.net,
jack@...e.cz, Matthew Wilcox <willy@...radead.org>,
aneesh.kumar@...ux.ibm.com, benh@...nel.crashing.org,
mpe@...erman.id.au, paulus@...ba.org,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, hpa@...or.com,
Will Deacon <will.deacon@....com>,
Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
sergey.senozhatsky.work@...il.com,
Andrea Arcangeli <aarcange@...hat.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>,
kemi.wang@...el.com, Daniel Jordan <daniel.m.jordan@...cle.com>,
David Rientjes <rientjes@...gle.com>,
Ganesh Mahendran <opensource.ganesh@...il.com>,
Minchan Kim <minchan@...nel.org>,
Punit Agrawal <punitagrawal@...il.com>,
vinayak menon <vinayakm.list@...il.com>,
Yang Shi <yang.shi@...ux.alibaba.com>,
zhong jiang <zhongjiang@...wei.com>,
Haiyan Song <haiyanx.song@...el.com>,
Balbir Singh <bsingharora@...il.com>, sj38.park@...il.com,
Michel Lespinasse <walken@...gle.com>,
Mike Rapoport <rppt@...ux.ibm.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
haren@...ux.vnet.ibm.com, npiggin@...il.com,
paulmck@...ux.vnet.ibm.com, Tim Chen <tim.c.chen@...ux.intel.com>,
linuxppc-dev@...ts.ozlabs.org, x86@...nel.org
Subject: Re: [PATCH v12 16/31] mm: introduce __vm_normal_page()
On Tue, Apr 16, 2019 at 03:45:07PM +0200, Laurent Dufour wrote:
> When dealing with the speculative fault path we should use the VMA's field
> cached value stored in the vm_fault structure.
>
> Currently vm_normal_page() is using the pointer to the VMA to fetch the
> vm_flags value. This patch provides a new __vm_normal_page() which is
> receiving the vm_flags flags value as parameter.
>
> Note: The speculative path is turned on for architecture providing support
> for special PTE flag. So only the first block of vm_normal_page is used
> during the speculative path.
>
> Signed-off-by: Laurent Dufour <ldufour@...ux.ibm.com>
Reviewed-by: Jérôme Glisse <jglisse@...hat.com>
> ---
> include/linux/mm.h | 18 +++++++++++++++---
> mm/memory.c | 21 ++++++++++++---------
> 2 files changed, 27 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index f465bb2b049e..f14b2c9ddfd4 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1421,9 +1421,21 @@ static inline void INIT_VMA(struct vm_area_struct *vma)
> #endif
> }
>
> -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
> - pte_t pte, bool with_public_device);
> -#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
> +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
> + pte_t pte, bool with_public_device,
> + unsigned long vma_flags);
> +static inline struct page *_vm_normal_page(struct vm_area_struct *vma,
> + unsigned long addr, pte_t pte,
> + bool with_public_device)
> +{
> + return __vm_normal_page(vma, addr, pte, with_public_device,
> + vma->vm_flags);
> +}
> +static inline struct page *vm_normal_page(struct vm_area_struct *vma,
> + unsigned long addr, pte_t pte)
> +{
> + return _vm_normal_page(vma, addr, pte, false);
> +}
>
> struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
> pmd_t pmd);
> diff --git a/mm/memory.c b/mm/memory.c
> index 85ec5ce5c0a8..be93f2c8ebe0 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -533,7 +533,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
> }
>
> /*
> - * vm_normal_page -- This function gets the "struct page" associated with a pte.
> + * __vm_normal_page -- This function gets the "struct page" associated with
> + * a pte.
> *
> * "Special" mappings do not wish to be associated with a "struct page" (either
> * it doesn't exist, or it exists but they don't want to touch it). In this
> @@ -574,8 +575,9 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
> * PFNMAP mappings in order to support COWable mappings.
> *
> */
> -struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
> - pte_t pte, bool with_public_device)
> +struct page *__vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
> + pte_t pte, bool with_public_device,
> + unsigned long vma_flags)
> {
> unsigned long pfn = pte_pfn(pte);
>
> @@ -584,7 +586,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
> goto check_pfn;
> if (vma->vm_ops && vma->vm_ops->find_special_page)
> return vma->vm_ops->find_special_page(vma, addr);
> - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
> + if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP))
> return NULL;
> if (is_zero_pfn(pfn))
> return NULL;
> @@ -620,8 +622,8 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
>
> /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
>
> - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
> - if (vma->vm_flags & VM_MIXEDMAP) {
> + if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
> + if (vma_flags & VM_MIXEDMAP) {
> if (!pfn_valid(pfn))
> return NULL;
> goto out;
> @@ -630,7 +632,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
> off = (addr - vma->vm_start) >> PAGE_SHIFT;
> if (pfn == vma->vm_pgoff + off)
> return NULL;
> - if (!is_cow_mapping(vma->vm_flags))
> + if (!is_cow_mapping(vma_flags))
> return NULL;
> }
> }
> @@ -2532,7 +2534,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
> {
> struct vm_area_struct *vma = vmf->vma;
>
> - vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
> + vmf->page = __vm_normal_page(vma, vmf->address, vmf->orig_pte, false,
> + vmf->vma_flags);
> if (!vmf->page) {
> /*
> * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
> @@ -3706,7 +3709,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
> ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
> update_mmu_cache(vma, vmf->address, vmf->pte);
>
> - page = vm_normal_page(vma, vmf->address, pte);
> + page = __vm_normal_page(vma, vmf->address, pte, false, vmf->vma_flags);
> if (!page) {
> pte_unmap_unlock(vmf->pte, vmf->ptl);
> return 0;
> --
> 2.21.0
>
Powered by blists - more mailing lists