[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <bc4c0b66-17da-4037-9359-a1a36748b335@amd.com>
Date: Wed, 15 Jan 2025 15:37:15 +0530
From: Shivank Garg <shivankg@....com>
To: Byungchul Park <byungchul@...com>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: kernel_team@...ynix.com, akpm@...ux-foundation.org, ziy@...dia.com
Subject: Re: [PATCH] mm: separate move/undo parts from migrate_pages_batch()
Hi Byungchul,
On 1/15/2025 3:23 PM, Byungchul Park wrote:
> Hi,
>
> This is a part of luf(lazy unmap flush) patchset and also referred
> several times by e.g. Shivank Garg and Zi Yan who are currently working
> on page migration optimization. Why don't we take this first so that
> such a migration optimization tries can use it.
>
> Byungchul
>
> --->8---
> From a65a6e4975962707bf87171e317f005c6276887e Mon Sep 17 00:00:00 2001
> From: Byungchul Park <byungchul@...com>
> Date: Thu, 8 Aug 2024 15:53:58 +0900
> Subject: [PATCH] mm: separate move/undo parts from migrate_pages_batch()
>
> Functionally, no change. This is a preparation for migration
> optimization tries that require to use separated folio lists for its own
> handling during migration. Refactored migrate_pages_batch() so as to
> separate move/undo parts from migrate_pages_batch().
>
> Signed-off-by: Byungchul Park <byungchul@...com>
> ---
> mm/migrate.c | 134 +++++++++++++++++++++++++++++++--------------------
> 1 file changed, 83 insertions(+), 51 deletions(-)
[...]
> + */
> + switch(rc) {
^^^
minor nit here - space after 'switch' before the parentheses.
This code style issue was present in the original code and was carried
over during refactoring.
With this fix, please add-
Reviewed-by: Shivank Garg <shivankg@....com>
> + case -EAGAIN:
> + *retry += 1;
> + *thp_retry += is_thp;
> + *nr_retry_pages += nr_pages;
> + break;
> + case MIGRATEPAGE_SUCCESS:
> + stats->nr_succeeded += nr_pages;
> + stats->nr_thp_succeeded += is_thp;
> + break;
> + default:
> + *nr_failed += 1;
> + stats->nr_thp_failed += is_thp;
> + stats->nr_failed_pages += nr_pages;
> + break;
> + }
> + dst = dst2;
> + dst2 = list_next_entry(dst, lru);
> + }
> +}
> +
> +static void migrate_folios_undo(struct list_head *src_folios,
> + struct list_head *dst_folios,
> + free_folio_t put_new_folio, unsigned long private,
> + struct list_head *ret_folios)
> +{
> + struct folio *folio, *folio2, *dst, *dst2;
> +
> + dst = list_first_entry(dst_folios, struct folio, lru);
> + dst2 = list_next_entry(dst, lru);
> + list_for_each_entry_safe(folio, folio2, src_folios, lru) {
> + int old_page_state = 0;
> + struct anon_vma *anon_vma = NULL;
> +
> + __migrate_folio_extract(dst, &old_page_state, &anon_vma);
> + migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
> + anon_vma, true, ret_folios);
> + list_del(&dst->lru);
> + migrate_folio_undo_dst(dst, true, put_new_folio, private);
> + dst = dst2;
> + dst2 = list_next_entry(dst, lru);
> + }
> +}
> +
> /*
> * migrate_pages_batch() first unmaps folios in the from list as many as
> * possible, then move the unmapped folios.
> @@ -1717,7 +1792,7 @@ static int migrate_pages_batch(struct list_head *from,
> int pass = 0;
> bool is_thp = false;
> bool is_large = false;
> - struct folio *folio, *folio2, *dst = NULL, *dst2;
> + struct folio *folio, *folio2, *dst = NULL;
> int rc, rc_saved = 0, nr_pages;
> LIST_HEAD(unmap_folios);
> LIST_HEAD(dst_folios);
> @@ -1888,42 +1963,11 @@ static int migrate_pages_batch(struct list_head *from,
> thp_retry = 0;
> nr_retry_pages = 0;
>
> - dst = list_first_entry(&dst_folios, struct folio, lru);
> - dst2 = list_next_entry(dst, lru);
> - list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
> - is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
> - nr_pages = folio_nr_pages(folio);
> -
> - cond_resched();
> -
> - rc = migrate_folio_move(put_new_folio, private,
> - folio, dst, mode,
> - reason, ret_folios);
> - /*
> - * The rules are:
> - * Success: folio will be freed
> - * -EAGAIN: stay on the unmap_folios list
> - * Other errno: put on ret_folios list
> - */
> - switch(rc) {
> - case -EAGAIN:
> - retry++;
> - thp_retry += is_thp;
> - nr_retry_pages += nr_pages;
> - break;
> - case MIGRATEPAGE_SUCCESS:
> - stats->nr_succeeded += nr_pages;
> - stats->nr_thp_succeeded += is_thp;
> - break;
> - default:
> - nr_failed++;
> - stats->nr_thp_failed += is_thp;
> - stats->nr_failed_pages += nr_pages;
> - break;
> - }
> - dst = dst2;
> - dst2 = list_next_entry(dst, lru);
> - }
> + /* Move the unmapped folios */
> + migrate_folios_move(&unmap_folios, &dst_folios,
> + put_new_folio, private, mode, reason,
> + ret_folios, stats, &retry, &thp_retry,
> + &nr_failed, &nr_retry_pages);
> }
> nr_failed += retry;
> stats->nr_thp_failed += thp_retry;
> @@ -1932,20 +1976,8 @@ static int migrate_pages_batch(struct list_head *from,
> rc = rc_saved ? : nr_failed;
> out:
> /* Cleanup remaining folios */
> - dst = list_first_entry(&dst_folios, struct folio, lru);
> - dst2 = list_next_entry(dst, lru);
> - list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
> - int old_page_state = 0;
> - struct anon_vma *anon_vma = NULL;
> -
> - __migrate_folio_extract(dst, &old_page_state, &anon_vma);
> - migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
> - anon_vma, true, ret_folios);
> - list_del(&dst->lru);
> - migrate_folio_undo_dst(dst, true, put_new_folio, private);
> - dst = dst2;
> - dst2 = list_next_entry(dst, lru);
> - }
> + migrate_folios_undo(&unmap_folios, &dst_folios,
> + put_new_folio, private, ret_folios);
>
> return rc;
> }
Powered by blists - more mailing lists