[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAF8kJuNsSeVEhcGz9idUahzR761w_OZV+dNY73ocMfQW=a1iOw@mail.gmail.com>
Date: Wed, 23 Aug 2023 06:16:13 -0700
From: Chris Li <chrisl@...nel.org>
To: David Hildenbrand <david@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-arm-kernel@...ts.infradead.org,
Andrew Morton <akpm@...ux-foundation.org>,
Matthew Wilcox <willy@...radead.org>,
Peter Xu <peterx@...hat.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Hugh Dickins <hughd@...gle.com>,
Seth Jennings <sjenning@...hat.com>,
Dan Streetman <ddstreet@...e.org>,
Vitaly Wool <vitaly.wool@...sulko.com>
Subject: Re: [PATCH mm-unstable v1 4/4] mm/huge_memory: work on folio->swap
instead of page->private when splitting folio
Hi David,
Reviewed-by: Chris Li <chrisl@...nel.org>
Chris
Chris
On Mon, Aug 21, 2023 at 9:09 AM David Hildenbrand <david@...hat.com> wrote:
>
> Let's work on folio->swap instead. While at it, use folio_test_anon() and
> folio_test_swapcache() -- the original folio remains valid even after
> splitting (but is then an order-0 folio).
>
> We can probably convert a lot more to folios in that code, let's focus
> on folio->swap handling only for now.
>
> Signed-off-by: David Hildenbrand <david@...hat.com>
> ---
> mm/huge_memory.c | 29 +++++++++++++++--------------
> 1 file changed, 15 insertions(+), 14 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index c04702ae71d2..4465915711c3 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2401,10 +2401,16 @@ static void lru_add_page_tail(struct page *head, struct page *tail,
> }
> }
>
> -static void __split_huge_page_tail(struct page *head, int tail,
> +static void __split_huge_page_tail(struct folio *folio, int tail,
> struct lruvec *lruvec, struct list_head *list)
> {
> + struct page *head = &folio->page;
> struct page *page_tail = head + tail;
> + /*
> + * Careful: new_folio is not a "real" folio before we cleared PageTail.
> + * Don't pass it around before clear_compound_head().
> + */
> + struct folio *new_folio = (struct folio *)page_tail;
>
> VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
>
> @@ -2453,8 +2459,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
> VM_WARN_ON_ONCE_PAGE(true, page_tail);
> page_tail->private = 0;
> }
> - if (PageSwapCache(head))
> - set_page_private(page_tail, (unsigned long)head->private + tail);
> + if (folio_test_swapcache(folio))
> + new_folio->swap.val = folio->swap.val + tail;
>
> /* Page flags must be visible before we make the page non-compound. */
> smp_wmb();
> @@ -2500,11 +2506,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> /* complete memcg works before add pages to LRU */
> split_page_memcg(head, nr);
>
> - if (PageAnon(head) && PageSwapCache(head)) {
> - swp_entry_t entry = { .val = page_private(head) };
> -
> - offset = swp_offset(entry);
> - swap_cache = swap_address_space(entry);
> + if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
> + offset = swp_offset(folio->swap);
> + swap_cache = swap_address_space(folio->swap);
> xa_lock(&swap_cache->i_pages);
> }
>
> @@ -2514,7 +2518,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> ClearPageHasHWPoisoned(head);
>
> for (i = nr - 1; i >= 1; i--) {
> - __split_huge_page_tail(head, i, lruvec, list);
> + __split_huge_page_tail(folio, i, lruvec, list);
> /* Some pages can be beyond EOF: drop them from page cache */
> if (head[i].index >= end) {
> struct folio *tail = page_folio(head + i);
> @@ -2559,11 +2563,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>
> remap_page(folio, nr);
>
> - if (PageSwapCache(head)) {
> - swp_entry_t entry = { .val = page_private(head) };
> -
> - split_swap_cluster(entry);
> - }
> + if (folio_test_swapcache(folio))
> + split_swap_cluster(folio->swap);
>
> for (i = 0; i < nr; i++) {
> struct page *subpage = head + i;
> --
> 2.41.0
>
>
Powered by blists - more mailing lists