[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <10e58e7e-a52e-751d-f693-cd4e05ac10ca@linux.dev>
Date: Mon, 22 May 2023 19:41:35 +0800
From: Qi Zheng <qi.zheng@...ux.dev>
To: Hugh Dickins <hughd@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>
Cc: Mike Kravetz <mike.kravetz@...cle.com>,
Mike Rapoport <rppt@...nel.org>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Matthew Wilcox <willy@...radead.org>,
David Hildenbrand <david@...hat.com>,
Suren Baghdasaryan <surenb@...gle.com>,
Qi Zheng <zhengqi.arch@...edance.com>,
Yang Shi <shy828301@...il.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Peter Xu <peterx@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Will Deacon <will@...nel.org>, Yu Zhao <yuzhao@...gle.com>,
Alistair Popple <apopple@...dia.com>,
Ralph Campbell <rcampbell@...dia.com>,
Ira Weiny <ira.weiny@...el.com>,
Steven Price <steven.price@....com>,
SeongJae Park <sj@...nel.org>,
Naoya Horiguchi <naoya.horiguchi@....com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Zack Rusin <zackr@...are.com>, Jason Gunthorpe <jgg@...pe.ca>,
Axel Rasmussen <axelrasmussen@...gle.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Minchan Kim <minchan@...nel.org>,
Christoph Hellwig <hch@...radead.org>,
Song Liu <song@...nel.org>,
Thomas Hellstrom <thomas.hellstrom@...ux.intel.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH 08/31] mm/page_vma_mapped: pte_offset_map_nolock() not
pte_lockptr()
On 2023/5/22 12:58, Hugh Dickins wrote:
> map_pte() use pte_offset_map_nolock(), to make sure of the ptl belonging
> to pte, even if pmd entry is then changed racily: page_vma_mapped_walk()
> use that instead of getting pte_lockptr() later, or restart if map_pte()
> found no page table.
>
> Signed-off-by: Hugh Dickins <hughd@...gle.com>
> ---
> mm/page_vma_mapped.c | 28 ++++++++++++++++++++++------
> 1 file changed, 22 insertions(+), 6 deletions(-)
>
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 947dc7491815..2af734274073 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -13,16 +13,28 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
> return false;
> }
>
> -static bool map_pte(struct page_vma_mapped_walk *pvmw)
> +static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
> {
> if (pvmw->flags & PVMW_SYNC) {
> /* Use the stricter lookup */
> pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
> pvmw->address, &pvmw->ptl);
> - return true;
> + *ptlp = pvmw->ptl;
> + return !!pvmw->pte;
> }
>
> - pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
> + /*
> + * It is important to return the ptl corresponding to pte,
> + * in case *pvmw->pmd changes underneath us; so we need to
> + * return it even when choosing not to lock, in case caller
> + * proceeds to loop over next ptes, and finds a match later.
> + * Though, in most cases, page lock already protects this.
> + */
> + pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
> + pvmw->address, ptlp);
> + if (!pvmw->pte)
> + return false;
> +
> if (pvmw->flags & PVMW_MIGRATION) {
> if (!is_swap_pte(*pvmw->pte))
> return false;
> @@ -51,7 +63,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
> } else if (!pte_present(*pvmw->pte)) {
> return false;
> }
> - pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
> + pvmw->ptl = *ptlp;
> spin_lock(pvmw->ptl);
> return true;
> }
> @@ -156,6 +168,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
> struct vm_area_struct *vma = pvmw->vma;
> struct mm_struct *mm = vma->vm_mm;
> unsigned long end;
> + spinlock_t *ptl;
> pgd_t *pgd;
> p4d_t *p4d;
> pud_t *pud;
> @@ -257,8 +270,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
> step_forward(pvmw, PMD_SIZE);
> continue;
> }
> - if (!map_pte(pvmw))
> + if (!map_pte(pvmw, &ptl)) {
> + if (!pvmw->pte)
> + goto restart;
Could pvmw->pmd be changed? Otherwise, how about just jumping to the
retry label below?
@@ -205,6 +205,8 @@ bool page_vma_mapped_walk(struct
page_vma_mapped_walk *pvmw)
}
pvmw->pmd = pmd_offset(pud, pvmw->address);
+
+retry:
/*
* Make sure the pmd value isn't cached in a register
by the
* compiler and used as a stale value after we've
observed a
> goto next_pte;
> + }
> this_pte:
> if (check_pte(pvmw))
> return true;
> @@ -281,7 +297,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
> } while (pte_none(*pvmw->pte));
>
> if (!pvmw->ptl) {
> - pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
> + pvmw->ptl = ptl;
> spin_lock(pvmw->ptl);
> }
> goto this_pte;
--
Thanks,
Qi
Powered by blists - more mailing lists