[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b04fabea-6216-3808-44e8-0a2125bf0230@nvidia.com>
Date: Wed, 16 Feb 2022 19:25:14 -0800
From: John Hubbard <jhubbard@...dia.com>
To: Peter Xu <peterx@...hat.com>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
"Kirill A . Shutemov" <kirill@...temov.name>,
Matthew Wilcox <willy@...radead.org>,
Yang Shi <shy828301@...il.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Alistair Popple <apopple@...dia.com>,
David Hildenbrand <david@...hat.com>,
Vlastimil Babka <vbabka@...e.cz>,
Hugh Dickins <hughd@...gle.com>
Subject: Re: [PATCH v4 4/4] mm: Rework swap handling of zap_pte_range
On 2/16/22 1:48 AM, Peter Xu wrote:
> Clean the code up by merging the device private/exclusive swap entry handling
> with the rest, then we merge the pte clear operation too.
Maybe also mention that you reduced the code duplication in the
is_device_private_entry() area, by letting it fall through to the common
pte_clear_not_present_full() at the end of the loop? Since you're listing
the other changes, that one seems worth mentioning.
>
> struct* page is defined in multiple places in the function, move it upward.
>
> free_swap_and_cache() is only useful for !non_swap_entry() case, put it into
> the condition.
>
> No functional change intended.
>
> Signed-off-by: Peter Xu <peterx@...hat.com>
> ---
> mm/memory.c | 21 ++++++---------------
> 1 file changed, 6 insertions(+), 15 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index ffa8c7dfe9ad..cade96024349 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1361,6 +1361,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
> arch_enter_lazy_mmu_mode();
> do {
> pte_t ptent = *pte;
> + struct page *page;
> +
> if (pte_none(ptent))
> continue;
>
> @@ -1368,8 +1370,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
> break;
>
> if (pte_present(ptent)) {
> - struct page *page;
> -
> page = vm_normal_page(vma, addr, ptent);
> if (unlikely(!should_zap_page(details, page)))
> continue;
> @@ -1403,21 +1403,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
> entry = pte_to_swp_entry(ptent);
> if (is_device_private_entry(entry) ||
> is_device_exclusive_entry(entry)) {
> - struct page *page = pfn_swap_entry_to_page(entry);
> -
> + page = pfn_swap_entry_to_page(entry);
> if (unlikely(!should_zap_page(details, page)))
> continue;
> - pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
Yes! Good cleanup there.
> rss[mm_counter(page)]--;
> -
> if (is_device_private_entry(entry))
> page_remove_rmap(page, false);
> -
> put_page(page);
> - continue;
> - }
> -
> - if (!non_swap_entry(entry)) {
> + } else if (!non_swap_entry(entry)) {
> /*
> * If this is a genuine swap entry, then it must be an
> * private anon page. If the caller wants to skip
> @@ -1426,9 +1419,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
> if (!should_zap_cows(details))
> continue;
> rss[MM_SWAPENTS]--;
> + if (unlikely(!free_swap_and_cache(entry)))
> + print_bad_pte(vma, addr, ptent, NULL);
> } else if (is_migration_entry(entry)) {
> - struct page *page;
> -
> page = pfn_swap_entry_to_page(entry);
> if (!should_zap_page(details, page))
> continue;
> @@ -1441,8 +1434,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
> /* We should have covered all the swap entry types */
> WARN_ON_ONCE(1);
> }
> - if (unlikely(!free_swap_and_cache(entry)))
> - print_bad_pte(vma, addr, ptent, NULL);
> pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
> } while (pte++, addr += PAGE_SIZE, addr != end);
>
Reviewed-by: John Hubbard <jhubbard@...dia.com>
thanks,
--
John Hubbard
NVIDIA
Powered by blists - more mailing lists