[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHbLzkqN0L9S3shxU-2PJBeqbKwt4knFN2_Nh-tAcfq=QfLJDw@mail.gmail.com>
Date: Fri, 30 Jul 2021 14:57:03 -0700
From: Yang Shi <shy828301@...il.com>
To: Hugh Dickins <hughd@...gle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Shakeel Butt <shakeelb@...gle.com>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Michal Hocko <mhocko@...e.com>,
Rik van Riel <riel@...riel.com>,
Christoph Hellwig <hch@...radead.org>,
Matthew Wilcox <willy@...radead.org>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
Alexey Gladkov <legion@...nel.org>,
Chris Wilson <chris@...is-wilson.co.uk>,
Matthew Auld <matthew.auld@...el.com>,
Linux FS-devel Mailing List <linux-fsdevel@...r.kernel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
linux-api@...r.kernel.org, Linux MM <linux-mm@...ck.org>
Subject: Re: [PATCH 05/16] huge tmpfs: move shmem_huge_enabled() upwards
On Fri, Jul 30, 2021 at 12:39 AM Hugh Dickins <hughd@...gle.com> wrote:
>
> shmem_huge_enabled() is about to be enhanced into shmem_is_huge(),
> so that it can be used more widely throughout: before making functional
> changes, shift it to its final position (to avoid forward declaration).
>
> Signed-off-by: Hugh Dickins <hughd@...gle.com>
Reviewed-by: Yang Shi <shy828301@...il.com>
> ---
> mm/shmem.c | 72 ++++++++++++++++++++++++++----------------------------
> 1 file changed, 35 insertions(+), 37 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index c6fa6f4f2db8..740d48ef1eb5 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -476,6 +476,41 @@ static bool shmem_confirm_swap(struct address_space *mapping,
>
> static int shmem_huge __read_mostly;
>
> +bool shmem_huge_enabled(struct vm_area_struct *vma)
> +{
> + struct inode *inode = file_inode(vma->vm_file);
> + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
> + loff_t i_size;
> + pgoff_t off;
> +
> + if ((vma->vm_flags & VM_NOHUGEPAGE) ||
> + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
> + return false;
> + if (shmem_huge == SHMEM_HUGE_FORCE)
> + return true;
> + if (shmem_huge == SHMEM_HUGE_DENY)
> + return false;
> + switch (sbinfo->huge) {
> + case SHMEM_HUGE_NEVER:
> + return false;
> + case SHMEM_HUGE_ALWAYS:
> + return true;
> + case SHMEM_HUGE_WITHIN_SIZE:
> + off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
> + i_size = round_up(i_size_read(inode), PAGE_SIZE);
> + if (i_size >= HPAGE_PMD_SIZE &&
> + i_size >> PAGE_SHIFT >= off)
> + return true;
> + fallthrough;
> + case SHMEM_HUGE_ADVISE:
> + /* TODO: implement fadvise() hints */
> + return (vma->vm_flags & VM_HUGEPAGE);
> + default:
> + VM_BUG_ON(1);
> + return false;
> + }
> +}
> +
> #if defined(CONFIG_SYSFS)
> static int shmem_parse_huge(const char *str)
> {
> @@ -3995,43 +4030,6 @@ struct kobj_attribute shmem_enabled_attr =
> __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
> #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
>
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -bool shmem_huge_enabled(struct vm_area_struct *vma)
> -{
> - struct inode *inode = file_inode(vma->vm_file);
> - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
> - loff_t i_size;
> - pgoff_t off;
> -
> - if ((vma->vm_flags & VM_NOHUGEPAGE) ||
> - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
> - return false;
> - if (shmem_huge == SHMEM_HUGE_FORCE)
> - return true;
> - if (shmem_huge == SHMEM_HUGE_DENY)
> - return false;
> - switch (sbinfo->huge) {
> - case SHMEM_HUGE_NEVER:
> - return false;
> - case SHMEM_HUGE_ALWAYS:
> - return true;
> - case SHMEM_HUGE_WITHIN_SIZE:
> - off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
> - i_size = round_up(i_size_read(inode), PAGE_SIZE);
> - if (i_size >= HPAGE_PMD_SIZE &&
> - i_size >> PAGE_SHIFT >= off)
> - return true;
> - fallthrough;
> - case SHMEM_HUGE_ADVISE:
> - /* TODO: implement fadvise() hints */
> - return (vma->vm_flags & VM_HUGEPAGE);
> - default:
> - VM_BUG_ON(1);
> - return false;
> - }
> -}
> -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
> -
> #else /* !CONFIG_SHMEM */
>
> /*
> --
> 2.26.2
>
Powered by blists - more mailing lists