[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230821204425.2940496-2-willy@infradead.org>
Date: Mon, 21 Aug 2023 21:44:20 +0100
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: Mateusz Guzik <mjguzik@...il.com>, linux-kernel@...r.kernel.org,
dennis@...nel.org, tj@...nel.org, cl@...ux.com,
akpm@...ux-foundation.org, shakeelb@...gle.com, linux-mm@...ck.org
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Subject: [PATCH 2/7] mm: Convert free_unref_page_list() to use folios
Most of its callees are not yet ready to accept a folio, but we know
all of the pages passed in are actually folios because they're linked
through ->lru.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
---
mm/page_alloc.c | 38 ++++++++++++++++++++------------------
1 file changed, 20 insertions(+), 18 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 81b1c7e3a28b..2f2185929fcb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2498,17 +2498,17 @@ void free_unref_page(struct page *page, unsigned int order)
void free_unref_page_list(struct list_head *list)
{
unsigned long __maybe_unused UP_flags;
- struct page *page, *next;
+ struct folio *folio, *next;
struct per_cpu_pages *pcp = NULL;
struct zone *locked_zone = NULL;
int batch_count = 0;
int migratetype;
/* Prepare pages for freeing */
- list_for_each_entry_safe(page, next, list, lru) {
- unsigned long pfn = page_to_pfn(page);
- if (!free_unref_page_prepare(page, pfn, 0)) {
- list_del(&page->lru);
+ list_for_each_entry_safe(folio, next, list, lru) {
+ unsigned long pfn = folio_pfn(folio);
+ if (!free_unref_page_prepare(&folio->page, pfn, 0)) {
+ list_del(&folio->lru);
continue;
}
@@ -2516,24 +2516,25 @@ void free_unref_page_list(struct list_head *list)
* Free isolated pages directly to the allocator, see
* comment in free_unref_page.
*/
- migratetype = get_pcppage_migratetype(page);
+ migratetype = get_pcppage_migratetype(&folio->page);
if (unlikely(is_migrate_isolate(migratetype))) {
- list_del(&page->lru);
- free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
+ list_del(&folio->lru);
+ free_one_page(folio_zone(folio), &folio->page, pfn,
+ 0, migratetype, FPI_NONE);
continue;
}
}
- list_for_each_entry_safe(page, next, list, lru) {
- struct zone *zone = page_zone(page);
+ list_for_each_entry_safe(folio, next, list, lru) {
+ struct zone *zone = folio_zone(folio);
- list_del(&page->lru);
- migratetype = get_pcppage_migratetype(page);
+ list_del(&folio->lru);
+ migratetype = get_pcppage_migratetype(&folio->page);
/*
* Either different zone requiring a different pcp lock or
* excessive lock hold times when freeing a large list of
- * pages.
+ * folios.
*/
if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
if (pcp) {
@@ -2544,15 +2545,16 @@ void free_unref_page_list(struct list_head *list)
batch_count = 0;
/*
- * trylock is necessary as pages may be getting freed
+ * trylock is necessary as folios may be getting freed
* from IRQ or SoftIRQ context after an IO completion.
*/
pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (unlikely(!pcp)) {
pcp_trylock_finish(UP_flags);
- free_one_page(zone, page, page_to_pfn(page),
- 0, migratetype, FPI_NONE);
+ free_one_page(zone, &folio->page,
+ folio_pfn(folio), 0,
+ migratetype, FPI_NONE);
locked_zone = NULL;
continue;
}
@@ -2566,8 +2568,8 @@ void free_unref_page_list(struct list_head *list)
if (unlikely(migratetype >= MIGRATE_PCPTYPES))
migratetype = MIGRATE_MOVABLE;
- trace_mm_page_free_batched(page);
- free_unref_page_commit(zone, pcp, page, migratetype, 0);
+ trace_mm_page_free_batched(&folio->page);
+ free_unref_page_commit(zone, pcp, &folio->page, migratetype, 0);
batch_count++;
}
--
2.40.1
Powered by blists - more mailing lists