[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <561f3796-e308-82c2-ae1f-f7ef94fe0691@redhat.com>
Date: Fri, 16 Jun 2023 10:11:11 +0200
From: David Hildenbrand <david@...hat.com>
To: Peter Xu <peterx@...hat.com>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: Matthew Wilcox <willy@...radead.org>,
Andrea Arcangeli <aarcange@...hat.com>,
John Hubbard <jhubbard@...dia.com>,
Mike Rapoport <rppt@...nel.org>,
Vlastimil Babka <vbabka@...e.cz>,
"Kirill A . Shutemov" <kirill@...temov.name>,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Kravetz <mike.kravetz@...cle.com>,
James Houghton <jthoughton@...gle.com>,
Hugh Dickins <hughd@...gle.com>
Subject: Re: [PATCH 3/7] mm/hugetlb: Add page_mask for
hugetlb_follow_page_mask()
On 13.06.23 23:53, Peter Xu wrote:
> follow_page() doesn't need it, but we'll start to need it when unifying gup
> for hugetlb.
>
> Signed-off-by: Peter Xu <peterx@...hat.com>
> ---
> include/linux/hugetlb.h | 8 +++++---
> mm/gup.c | 3 ++-
> mm/hugetlb.c | 4 +++-
> 3 files changed, 10 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 21f942025fec..0d6f389d98de 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -131,7 +131,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
> int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
> struct vm_area_struct *, struct vm_area_struct *);
> struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> - unsigned long address, unsigned int flags);
> + unsigned long address, unsigned int flags,
> + unsigned int *page_mask);
> long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
> struct page **, unsigned long *, unsigned long *,
> long, unsigned int, int *);
> @@ -297,8 +298,9 @@ static inline void adjust_range_if_pmd_sharing_possible(
> {
> }
>
> -static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> - unsigned long address, unsigned int flags)
> +static inline struct page *hugetlb_follow_page_mask(
> + struct vm_area_struct *vma, unsigned long address, unsigned int flags,
> + unsigned int *page_mask)
> {
> BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
> }
> diff --git a/mm/gup.c b/mm/gup.c
> index aa0668505d61..8d59ae4554e7 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -794,7 +794,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
> * Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
> */
> if (is_vm_hugetlb_page(vma))
> - return hugetlb_follow_page_mask(vma, address, flags);
> + return hugetlb_follow_page_mask(vma, address, flags,
> + &ctx->page_mask);
>
> pgd = pgd_offset(mm, address);
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 9c261921b2cf..f037eaf9d819 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6457,7 +6457,8 @@ static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
> }
>
> struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> - unsigned long address, unsigned int flags)
> + unsigned long address, unsigned int flags,
> + unsigned int *page_mask)
> {
> struct hstate *h = hstate_vma(vma);
> struct mm_struct *mm = vma->vm_mm;
> @@ -6506,6 +6507,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> * because we hold the ptl lock and have verified pte_present().
> */
> WARN_ON_ONCE(try_grab_page(page, flags));
> + *page_mask = huge_page_mask(h);
> }
> out:
> spin_unlock(ptl);
Reviewed-by: David Hildenbrand <david@...hat.com>
--
Cheers,
David / dhildenb
Powered by blists - more mailing lists