[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230802095346.87449-4-wangkefeng.wang@huawei.com>
Date: Wed, 2 Aug 2023 17:53:45 +0800
From: Kefeng Wang <wangkefeng.wang@...wei.com>
To: Andrew Morton <akpm@...ux-foundation.org>, <linux-mm@...ck.org>,
<linux-kernel@...r.kernel.org>
CC: Huang Ying <ying.huang@...el.com>,
David Hildenbrand <david@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Kefeng Wang <wangkefeng.wang@...wei.com>
Subject: [PATCH 3/4] mm: migrate: make migrate_misplaced_page() to take a folio
Make migrate_misplaced_page() to take a folio and use folio API
to save compound_head() calls.
Signed-off-by: Kefeng Wang <wangkefeng.wang@...wei.com>
---
mm/migrate.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index 39e0d35bccb3..4be61f944cac 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2525,17 +2525,18 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
int node)
{
pg_data_t *pgdat = NODE_DATA(node);
+ struct folio *folio = page_folio(page);
int isolated;
int nr_remaining;
unsigned int nr_succeeded;
LIST_HEAD(migratepages);
- int nr_pages = thp_nr_pages(page);
+ int nr_pages = folio_nr_pages(folio);
/*
* Don't migrate file pages that are mapped in multiple processes
* with execute permissions as they are probably shared libraries.
*/
- if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
+ if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
(vma->vm_flags & VM_EXEC))
goto out;
@@ -2543,29 +2544,29 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
* Also do not migrate dirty pages as not all filesystems can move
* dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
*/
- if (page_is_file_lru(page) && PageDirty(page))
+ if (folio_is_file_lru(folio) && folio_test_dirty(folio))
goto out;
- isolated = numamigrate_isolate_folio(pgdat, page_folio(page));
+ isolated = numamigrate_isolate_folio(pgdat, folio);
if (!isolated)
goto out;
- list_add(&page->lru, &migratepages);
+ list_add(&folio->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED, &nr_succeeded);
if (nr_remaining) {
if (!list_empty(&migratepages)) {
- list_del(&page->lru);
- mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
- page_is_file_lru(page), -nr_pages);
- putback_lru_page(page);
+ list_del(&folio->lru);
+ node_stat_mod_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio), -nr_pages);
+ folio_putback_lru(folio);
}
isolated = 0;
}
if (nr_succeeded) {
count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
- if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
+ if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
nr_succeeded);
}
@@ -2573,7 +2574,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
return isolated;
out:
- put_page(page);
+ folio_put(folio);
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
--
2.41.0
Powered by blists - more mailing lists