[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <63a8f509-6acb-48b5-1aa1-c278deaaa719@linux.alibaba.com>
Date: Mon, 7 Nov 2022 15:26:18 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Huang Ying <ying.huang@...el.com>, linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
Zi Yan <ziy@...dia.com>, Yang Shi <shy828301@...il.com>,
Oscar Salvador <osalvador@...e.de>,
Matthew Wilcox <willy@...radead.org>
Subject: Re: [PATCH 1/2] migrate: convert unmap_and_move() to use folios
On 11/4/2022 4:30 PM, Huang Ying wrote:
> Quite straightforward, the page functions are converted to
> corresponding folio functions. Same for comments.
>
LGTM. Please feel free to add:
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
> Signed-off-by: "Huang, Ying" <ying.huang@...el.com>
> Cc: Andrew Morton <akpm@...ux-foundation.org>
> Cc: Zi Yan <ziy@...dia.com>
> Cc: Yang Shi <shy828301@...il.com>
> Cc: Baolin Wang <baolin.wang@...ux.alibaba.com>
> Cc: Oscar Salvador <osalvador@...e.de>
> Cc: Matthew Wilcox <willy@...radead.org>
> ---
> mm/migrate.c | 54 ++++++++++++++++++++++++++--------------------------
> 1 file changed, 27 insertions(+), 27 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index dff333593a8a..f6dd749dd2f8 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1150,79 +1150,79 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
> }
>
> /*
> - * Obtain the lock on page, remove all ptes and migrate the page
> - * to the newly allocated page in newpage.
> + * Obtain the lock on folio, remove all ptes and migrate the folio
> + * to the newly allocated folio in dst.
> */
> static int unmap_and_move(new_page_t get_new_page,
> free_page_t put_new_page,
> - unsigned long private, struct page *page,
> + unsigned long private, struct folio *src,
> int force, enum migrate_mode mode,
> enum migrate_reason reason,
> struct list_head *ret)
> {
> - struct folio *dst, *src = page_folio(page);
> + struct folio *dst;
> int rc = MIGRATEPAGE_SUCCESS;
> struct page *newpage = NULL;
>
> - if (!thp_migration_supported() && PageTransHuge(page))
> + if (!thp_migration_supported() && folio_test_transhuge(src))
> return -ENOSYS;
>
> - if (page_count(page) == 1) {
> - /* Page was freed from under us. So we are done. */
> - ClearPageActive(page);
> - ClearPageUnevictable(page);
> + if (folio_ref_count(src) == 1) {
> + /* Folio was freed from under us. So we are done. */
> + folio_clear_active(src);
> + folio_clear_unevictable(src);
> /* free_pages_prepare() will clear PG_isolated. */
> goto out;
> }
>
> - newpage = get_new_page(page, private);
> + newpage = get_new_page(&src->page, private);
> if (!newpage)
> return -ENOMEM;
> dst = page_folio(newpage);
>
> - newpage->private = 0;
> + dst->private = 0;
> rc = __unmap_and_move(src, dst, force, mode);
> if (rc == MIGRATEPAGE_SUCCESS)
> - set_page_owner_migrate_reason(newpage, reason);
> + set_page_owner_migrate_reason(&dst->page, reason);
>
> out:
> if (rc != -EAGAIN) {
> /*
> - * A page that has been migrated has all references
> - * removed and will be freed. A page that has not been
> + * A folio that has been migrated has all references
> + * removed and will be freed. A folio that has not been
> * migrated will have kept its references and be restored.
> */
> - list_del(&page->lru);
> + list_del(&src->lru);
> }
>
> /*
> * If migration is successful, releases reference grabbed during
> - * isolation. Otherwise, restore the page to right list unless
> + * isolation. Otherwise, restore the folio to right list unless
> * we want to retry.
> */
> if (rc == MIGRATEPAGE_SUCCESS) {
> /*
> - * Compaction can migrate also non-LRU pages which are
> + * Compaction can migrate also non-LRU folios which are
> * not accounted to NR_ISOLATED_*. They can be recognized
> - * as __PageMovable
> + * as __folio_test_movable
> */
> - if (likely(!__PageMovable(page)))
> - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
> - page_is_file_lru(page), -thp_nr_pages(page));
> + if (likely(!__folio_test_movable(src)))
> + mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
> + folio_is_file_lru(src), -folio_nr_pages(src));
>
> if (reason != MR_MEMORY_FAILURE)
> /*
> - * We release the page in page_handle_poison.
> + * We release the folio in page_handle_poison.
> */
> - put_page(page);
> + folio_put(src);
> } else {
> if (rc != -EAGAIN)
> - list_add_tail(&page->lru, ret);
> + list_add_tail(&src->lru, ret);
>
> if (put_new_page)
> - put_new_page(newpage, private);
> + put_new_page(&dst->page, private);
> else
> - put_page(newpage);
> + folio_put(dst);
> }
>
> return rc;
> @@ -1459,7 +1459,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
> &ret_pages);
> else
> rc = unmap_and_move(get_new_page, put_new_page,
> - private, page, pass > 2, mode,
> + private, page_folio(page), pass > 2, mode,
> reason, &ret_pages);
> /*
> * The rules are:
Powered by blists - more mailing lists