[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b61bcd10-df14-ff01-bae1-f4b99a8b97e2@oracle.com>
Date: Thu, 30 Mar 2023 10:29:12 -0700
From: Sidhartha Kumar <sidhartha.kumar@...cle.com>
To: Peng Zhang <zhangpeng362@...wei.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, akpm@...ux-foundation.org,
willy@...radead.org, mike.kravetz@...cle.com
Cc: vishal.moola@...il.com, muchun.song@...ux.dev,
wangkefeng.wang@...wei.com, sunnanyong@...wei.com
Subject: Re: [PATCH v4 4/6] userfaultfd: convert mfill_atomic_hugetlb() to use
a folio
On 3/30/23 6:40 AM, Peng Zhang wrote:
> From: ZhangPeng <zhangpeng362@...wei.com>
>
> Convert hugetlb_mfill_atomic_pte() to take in a folio pointer instead of
> a page pointer. Convert mfill_atomic_hugetlb() to use a folio.
>
> Signed-off-by: ZhangPeng <zhangpeng362@...wei.com>
> ---
> include/linux/hugetlb.h | 4 ++--
> mm/hugetlb.c | 26 +++++++++++++-------------
> mm/userfaultfd.c | 16 ++++++++--------
> 3 files changed, 23 insertions(+), 23 deletions(-)
>
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 2a758bcd6719..28703fe22386 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -163,7 +163,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> unsigned long dst_addr,
> unsigned long src_addr,
> uffd_flags_t flags,
> - struct page **pagep);
> + struct folio **foliop);
> #endif /* CONFIG_USERFAULTFD */
> bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
> struct vm_area_struct *vma,
> @@ -397,7 +397,7 @@ static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> unsigned long dst_addr,
> unsigned long src_addr,
> uffd_flags_t flags,
> - struct page **pagep)
> + struct folio **foliop)
> {
> BUG();
> return 0;
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index aade1b513474..c88f856ec2e2 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6178,7 +6178,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> unsigned long dst_addr,
> unsigned long src_addr,
> uffd_flags_t flags,
> - struct page **pagep)
> + struct folio **foliop)
> {
> struct mm_struct *dst_mm = dst_vma->vm_mm;
> bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
> @@ -6201,8 +6201,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> if (IS_ERR(folio))
> goto out;
> folio_in_pagecache = true;
> - } else if (!*pagep) {
> - /* If a page already exists, then it's UFFDIO_COPY for
> + } else if (!*foliop) {
> + /* If a folio already exists, then it's UFFDIO_COPY for
> * a non-missing case. Return -EEXIST.
> */
> if (vm_shared &&
> @@ -6237,33 +6237,33 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
> ret = -ENOMEM;
> goto out;
> }
> - *pagep = &folio->page;
> - /* Set the outparam pagep and return to the caller to
> + *foliop = folio;
> + /* Set the outparam foliop and return to the caller to
> * copy the contents outside the lock. Don't free the
> - * page.
> + * folio.
> */
> goto out;
> }
> } else {
> if (vm_shared &&
> hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
> - put_page(*pagep);
> + folio_put(*foliop);
> ret = -EEXIST;
> - *pagep = NULL;
> + *foliop = NULL;
> goto out;
> }
>
> folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> if (IS_ERR(folio)) {
> - put_page(*pagep);
> + folio_put(*foliop);
> ret = -ENOMEM;
> - *pagep = NULL;
> + *foliop = NULL;
> goto out;
> }
> - copy_user_huge_page(&folio->page, *pagep, dst_addr, dst_vma,
> + copy_user_huge_page(&folio->page, &(*foliop)->page, dst_addr, dst_vma,
> pages_per_huge_page(h));
> - put_page(*pagep);
> - *pagep = NULL;
> + folio_put(*foliop);
> + *foliop = NULL;
> }
>
> /*
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index b453a4d2a0d3..9e95af5ce2dd 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -322,7 +322,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> pte_t *dst_pte;
> unsigned long src_addr, dst_addr;
> long copied;
> - struct page *page;
> + struct folio *folio;
> unsigned long vma_hpagesize;
> pgoff_t idx;
> u32 hash;
> @@ -342,7 +342,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> src_addr = src_start;
> dst_addr = dst_start;
> copied = 0;
> - page = NULL;
> + folio = NULL;
> vma_hpagesize = vma_kernel_pagesize(dst_vma);
>
> /*
> @@ -411,7 +411,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> }
>
> err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
> - src_addr, flags, &page);
> + src_addr, flags, &folio);
>
> hugetlb_vma_unlock_read(dst_vma);
> mutex_unlock(&hugetlb_fault_mutex_table[hash]);
> @@ -420,9 +420,9 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
>
> if (unlikely(err == -ENOENT)) {
> mmap_read_unlock(dst_mm);
> - BUG_ON(!page);
> + BUG_ON(!folio);
>
> - err = copy_folio_from_user(page_folio(page),
> + err = copy_folio_from_user(folio,
> (const void __user *)src_addr, true);
> if (unlikely(err)) {
> err = -EFAULT;
> @@ -433,7 +433,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> dst_vma = NULL;
> goto retry;
> } else
> - BUG_ON(page);
> + BUG_ON(folio);
>
> if (!err) {
> dst_addr += vma_hpagesize;
> @@ -450,8 +450,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
> out_unlock:
> mmap_read_unlock(dst_mm);
> out:
> - if (page)
> - put_page(page);
> + if (folio)
> + folio_put(folio);
> BUG_ON(copied < 0);
> BUG_ON(err > 0);
> BUG_ON(!copied && !err);
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>
Powered by blists - more mailing lists