[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHbLzkoKZ9OdUfP5DX81CKOJWrRZ0GANrmenNeKWNmSOgUh0bQ@mail.gmail.com>
Date: Fri, 30 Jul 2021 16:34:09 -0700
From: Yang Shi <shy828301@...il.com>
To: Hugh Dickins <hughd@...gle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Shakeel Butt <shakeelb@...gle.com>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Michal Hocko <mhocko@...e.com>,
Rik van Riel <riel@...riel.com>,
Christoph Hellwig <hch@...radead.org>,
Matthew Wilcox <willy@...radead.org>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
Alexey Gladkov <legion@...nel.org>,
Chris Wilson <chris@...is-wilson.co.uk>,
Matthew Auld <matthew.auld@...el.com>,
Linux FS-devel Mailing List <linux-fsdevel@...r.kernel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
linux-api@...r.kernel.org, Linux MM <linux-mm@...ck.org>
Subject: Re: [PATCH 06/16] huge tmpfs: shmem_is_huge(vma, inode, index)
On Fri, Jul 30, 2021 at 12:42 AM Hugh Dickins <hughd@...gle.com> wrote:
>
> Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
> that a consistent set of checks can be applied, even when the inode is
> accessed through read/write syscalls (with NULL vma) instead of mmaps
> (the index argument is seldom of interest, but required by mount option
> "huge=within_size"). Clean up and rearrange the checks a little.
>
> This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
> were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
> still true that khugepaged's collapse_file() at that point wants a small
> page, the race that might allocate it a huge page is too unlikely to be
> worth optimizing against (we are there *because* there was at least one
> small page in the way), and handled by a later PageTransCompound check.
Yes, it seems too unlikely. But if it happens the PageTransCompound
check may be not good enough since the page allocated by
shmem_getpage() may be charged to wrong memcg (root memcg). And it
won't be replaced by a newly allocated huge page so the wrong charge
can't be undone.
And, another question is it seems the newly allocated huge page will
just be uncharged instead of being freed until
"khugepaged_pages_to_scan" pages are scanned. The
khugepaged_prealloc_page() is called to free the allocated huge page
before each call to khugepaged_scan_mm_slot(). But
khugepaged_scan_file() -> collapse_fille() -> khugepaged_alloc_page()
may be called multiple times in the loop in khugepaged_scan_mm_slot(),
so khugepaged_alloc_page() may see that page to trigger VM_BUG IIUC.
The code is quite convoluted, I'm not sure whether I miss something or
not. And this problem seems very hard to trigger in real life
workload.
>
> Replace a couple of 0s by explicit SHMEM_HUGE_NEVERs; and replace the
> obscure !shmem_mapping() symlink check by explicit S_ISLNK() - nothing
> else needs that symlink check, so leave it there in shmem_getpage_gfp().
>
> Signed-off-by: Hugh Dickins <hughd@...gle.com>
> ---
> include/linux/shmem_fs.h | 9 +++--
> mm/khugepaged.c | 2 +-
> mm/shmem.c | 84 ++++++++++++----------------------------
> 3 files changed, 32 insertions(+), 63 deletions(-)
>
> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> index 9b7f7ac52351..3b05a28e34c4 100644
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -86,7 +86,12 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
> extern int shmem_unuse(unsigned int type, bool frontswap,
> unsigned long *fs_pages_to_unuse);
>
> -extern bool shmem_huge_enabled(struct vm_area_struct *vma);
> +extern bool shmem_is_huge(struct vm_area_struct *vma,
> + struct inode *inode, pgoff_t index);
> +static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
> +{
> + return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff);
> +}
> extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
> extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
> pgoff_t start, pgoff_t end);
> @@ -95,8 +100,6 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
> enum sgp_type {
> SGP_READ, /* don't exceed i_size, don't allocate page */
> SGP_CACHE, /* don't exceed i_size, may allocate page */
> - SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
> - SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
> SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
> SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
> };
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index b0412be08fa2..cecb19c3e965 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1721,7 +1721,7 @@ static void collapse_file(struct mm_struct *mm,
> xas_unlock_irq(&xas);
> /* swap in or instantiate fallocated page */
> if (shmem_getpage(mapping->host, index, &page,
> - SGP_NOHUGE)) {
> + SGP_CACHE)) {
> result = SCAN_FAIL;
> goto xa_unlocked;
> }
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 740d48ef1eb5..6def7391084c 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -474,39 +474,35 @@ static bool shmem_confirm_swap(struct address_space *mapping,
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> /* ifdef here to avoid bloating shmem.o when not necessary */
>
> -static int shmem_huge __read_mostly;
> +static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
>
> -bool shmem_huge_enabled(struct vm_area_struct *vma)
> +bool shmem_is_huge(struct vm_area_struct *vma,
> + struct inode *inode, pgoff_t index)
> {
> - struct inode *inode = file_inode(vma->vm_file);
> - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
> loff_t i_size;
> - pgoff_t off;
>
> - if ((vma->vm_flags & VM_NOHUGEPAGE) ||
> - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
> - return false;
> - if (shmem_huge == SHMEM_HUGE_FORCE)
> - return true;
> if (shmem_huge == SHMEM_HUGE_DENY)
> return false;
> - switch (sbinfo->huge) {
> - case SHMEM_HUGE_NEVER:
> + if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
> + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
> return false;
> + if (shmem_huge == SHMEM_HUGE_FORCE)
> + return true;
> +
> + switch (SHMEM_SB(inode->i_sb)->huge) {
> case SHMEM_HUGE_ALWAYS:
> return true;
> case SHMEM_HUGE_WITHIN_SIZE:
> - off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
> + index = round_up(index, HPAGE_PMD_NR);
> i_size = round_up(i_size_read(inode), PAGE_SIZE);
> - if (i_size >= HPAGE_PMD_SIZE &&
> - i_size >> PAGE_SHIFT >= off)
> + if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
> return true;
> fallthrough;
> case SHMEM_HUGE_ADVISE:
> - /* TODO: implement fadvise() hints */
> - return (vma->vm_flags & VM_HUGEPAGE);
> + if (vma && (vma->vm_flags & VM_HUGEPAGE))
> + return true;
> + fallthrough;
> default:
> - VM_BUG_ON(1);
> return false;
> }
> }
> @@ -680,6 +676,12 @@ static long shmem_unused_huge_count(struct super_block *sb,
>
> #define shmem_huge SHMEM_HUGE_DENY
>
> +bool shmem_is_huge(struct vm_area_struct *vma,
> + struct inode *inode, pgoff_t index)
> +{
> + return false;
> +}
> +
> static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> struct shrink_control *sc, unsigned long nr_to_split)
> {
> @@ -1829,7 +1831,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
> struct shmem_sb_info *sbinfo;
> struct mm_struct *charge_mm;
> struct page *page;
> - enum sgp_type sgp_huge = sgp;
> pgoff_t hindex = index;
> gfp_t huge_gfp;
> int error;
> @@ -1838,8 +1839,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
>
> if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
> return -EFBIG;
> - if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
> - sgp = SGP_CACHE;
> repeat:
> if (sgp <= SGP_CACHE &&
> ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
> @@ -1898,36 +1897,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
> return 0;
> }
>
> - /* shmem_symlink() */
> - if (!shmem_mapping(mapping))
> - goto alloc_nohuge;
> - if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
> + /* Never use a huge page for shmem_symlink() */
> + if (S_ISLNK(inode->i_mode))
> goto alloc_nohuge;
> - if (shmem_huge == SHMEM_HUGE_FORCE)
> - goto alloc_huge;
> - switch (sbinfo->huge) {
> - case SHMEM_HUGE_NEVER:
> + if (!shmem_is_huge(vma, inode, index))
> goto alloc_nohuge;
> - case SHMEM_HUGE_WITHIN_SIZE: {
> - loff_t i_size;
> - pgoff_t off;
> -
> - off = round_up(index, HPAGE_PMD_NR);
> - i_size = round_up(i_size_read(inode), PAGE_SIZE);
> - if (i_size >= HPAGE_PMD_SIZE &&
> - i_size >> PAGE_SHIFT >= off)
> - goto alloc_huge;
>
> - fallthrough;
> - }
> - case SHMEM_HUGE_ADVISE:
> - if (sgp_huge == SGP_HUGE)
> - goto alloc_huge;
> - /* TODO: implement fadvise() hints */
> - goto alloc_nohuge;
> - }
> -
> -alloc_huge:
> huge_gfp = vma_thp_gfp_mask(vma);
> huge_gfp = limit_gfp_mask(huge_gfp, gfp);
> page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
> @@ -2083,7 +2058,6 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
> struct vm_area_struct *vma = vmf->vma;
> struct inode *inode = file_inode(vma->vm_file);
> gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
> - enum sgp_type sgp;
> int err;
> vm_fault_t ret = VM_FAULT_LOCKED;
>
> @@ -2146,15 +2120,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
> spin_unlock(&inode->i_lock);
> }
>
> - sgp = SGP_CACHE;
> -
> - if ((vma->vm_flags & VM_NOHUGEPAGE) ||
> - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
> - sgp = SGP_NOHUGE;
> - else if (vma->vm_flags & VM_HUGEPAGE)
> - sgp = SGP_HUGE;
> -
> - err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
> + err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
> gfp, vma, vmf, &ret);
> if (err)
> return vmf_error(err);
> @@ -3961,7 +3927,7 @@ int __init shmem_init(void)
> if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
> SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
> else
> - shmem_huge = 0; /* just in case it was patched */
> + shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
> #endif
> return 0;
>
> --
> 2.26.2
>
Powered by blists - more mailing lists