[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4bcf5e1d-cd86-319a-889f-782755955e04@suse.cz>
Date: Tue, 5 Oct 2021 13:15:05 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Peter Xu <peterx@...hat.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Hugh Dickins <hughd@...gle.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Matthew Wilcox <willy@...radead.org>
Subject: Re: [PATCH 3/3] mm/smaps: Simplify shmem handling of pte holes
On 9/17/21 18:47, Peter Xu wrote:
> Firstly, check_shmem_swap variable is actually not necessary, because it's
> always set with pte_hole hook; checking each would work.
Right...
> Meanwhile, the check within smaps_pte_entry is not easy to follow. E.g.,
> pte_none() check is not needed as "!pte_present && !is_swap_pte" is the same.
Seems to be true, indeed.
> Since at it, use the pte_hole() helper rather than dup the page cache lookup.
pte_hole() is for checking a range and we are calling it for single page,
isnt't that causing larger overhead in the end? There's xarray involved, so
maybe Matthew will know best.
> Still keep the CONFIG_SHMEM part so the code can be optimized to nop for !SHMEM.
>
> There will be a very slight functional change in smaps_pte_entry(), that for
> !SHMEM we'll return early for pte_none (before checking page==NULL), but that's
> even nicer.
I don't think this is true, 'unlikely(IS_ENABLED(CONFIG_SHMEM))' will be a
compile-time constant false and shortcut the rest of the 'if' evaluation
thus there will be no page check? Or I misunderstood.
> Cc: Hugh Dickins <hughd@...gle.com>
> Cc: Matthew Wilcox <willy@...radead.org>
> Cc: Vlastimil Babka <vbabka@...e.cz>
> Signed-off-by: Peter Xu <peterx@...hat.com>
> ---
> fs/proc/task_mmu.c | 22 ++++++++++++----------
> 1 file changed, 12 insertions(+), 10 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 2197f669e17b..ad667dbc96f5 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -397,7 +397,6 @@ struct mem_size_stats {
> u64 pss_shmem;
> u64 pss_locked;
> u64 swap_pss;
> - bool check_shmem_swap;
> };
>
> static void smaps_page_accumulate(struct mem_size_stats *mss,
> @@ -490,6 +489,16 @@ static int smaps_pte_hole(unsigned long addr, unsigned long end,
> #define smaps_pte_hole NULL
> #endif /* CONFIG_SHMEM */
>
> +static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk)
> +{
> +#ifdef CONFIG_SHMEM
> + if (walk->ops->pte_hole) {
> + /* depth is not used */
> + smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk);
> + }
> +#endif
> +}
> +
> static void smaps_pte_entry(pte_t *pte, unsigned long addr,
> struct mm_walk *walk)
> {
> @@ -518,12 +527,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
> }
> } else if (is_pfn_swap_entry(swpent))
> page = pfn_swap_entry_to_page(swpent);
> - } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
> - && pte_none(*pte))) {
> - page = xa_load(&vma->vm_file->f_mapping->i_pages,
> - linear_page_index(vma, addr));
> - if (xa_is_value(page))
> - mss->swap += PAGE_SIZE;
> + } else {
> + smaps_pte_hole_lookup(addr, walk);
> return;
> }
>
> @@ -737,8 +742,6 @@ static void smap_gather_stats(struct vm_area_struct *vma,
> return;
>
> #ifdef CONFIG_SHMEM
> - /* In case of smaps_rollup, reset the value from previous vma */
> - mss->check_shmem_swap = false;
> if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
> /*
> * For shared or readonly shmem mappings we know that all
> @@ -756,7 +759,6 @@ static void smap_gather_stats(struct vm_area_struct *vma,
> !(vma->vm_flags & VM_WRITE))) {
> mss->swap += shmem_swapped;
> } else {
> - mss->check_shmem_swap = true;
> ops = &smaps_shmem_walk_ops;
> }
> }
>
Powered by blists - more mailing lists