[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240805145940.2911011-6-kirill.shutemov@linux.intel.com>
Date: Mon, 5 Aug 2024 17:59:37 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
"Borislav Petkov (AMD)" <bp@...en8.de>,
Mel Gorman <mgorman@...e.de>,
Vlastimil Babka <vbabka@...e.cz>
Cc: Tom Lendacky <thomas.lendacky@....com>,
Mike Rapoport <rppt@...nel.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
David Hildenbrand <david@...hat.com>,
Johannes Weiner <hannes@...xchg.org>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [PATCH 5/8] mm: Add a helper to accept page
Accept a given struct page and add it free list.
The help is useful for physical memory scanners that want to use free
unaccepted memory.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
---
mm/internal.h | 8 ++++++++
mm/page_alloc.c | 53 +++++++++++++++++++++++++++++++++++--------------
2 files changed, 46 insertions(+), 15 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index b4d86436565b..d358535f8a7e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1578,4 +1578,12 @@ void unlink_file_vma_batch_init(struct unlink_vma_file_batch *);
void unlink_file_vma_batch_add(struct unlink_vma_file_batch *, struct vm_area_struct *);
void unlink_file_vma_batch_final(struct unlink_vma_file_batch *);
+#ifdef CONFIG_UNACCEPTED_MEMORY
+void accept_page(struct page *page);
+#else /* CONFIG_UNACCEPTED_MEMORY */
+static inline void accept_page(struct page *page)
+{
+}
+#endif /* CONFIG_UNACCEPTED_MEMORY */
+
#endif /* __MM_INTERNAL_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 34718852d576..5f33e92b0a55 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6953,11 +6953,46 @@ static void accept_page_memory(struct page *page, unsigned int order)
accept_memory(start, start + (PAGE_SIZE << order));
}
+static void __accept_page(struct zone *zone, unsigned long *flags,
+ struct page *page)
+{
+ bool last;
+
+ list_del(&page->lru);
+ last = list_empty(&zone->unaccepted_pages);
+
+ account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
+ __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
+ __ClearPageUnaccepted(page);
+ spin_unlock_irqrestore(&zone->lock, *flags);
+
+ accept_page_memory(page, MAX_PAGE_ORDER);
+
+ __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
+
+ if (last)
+ static_branch_dec(&zones_with_unaccepted_pages);
+}
+
+void accept_page(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ if (!PageUnaccepted(page)) {
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return;
+ }
+
+ /* Unlocks zone->lock */
+ __accept_page(zone, &flags, page);
+}
+
static bool try_to_accept_memory_one(struct zone *zone)
{
unsigned long flags;
struct page *page;
- bool last;
spin_lock_irqsave(&zone->lock, flags);
page = list_first_entry_or_null(&zone->unaccepted_pages,
@@ -6967,20 +7002,8 @@ static bool try_to_accept_memory_one(struct zone *zone)
return false;
}
- list_del(&page->lru);
- last = list_empty(&zone->unaccepted_pages);
-
- account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
- __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
- __ClearPageUnaccepted(page);
- spin_unlock_irqrestore(&zone->lock, flags);
-
- accept_page_memory(page, MAX_PAGE_ORDER);
-
- __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
-
- if (last)
- static_branch_dec(&zones_with_unaccepted_pages);
+ /* Unlocks zone->lock */
+ __accept_page(zone, &flags, page);
return true;
}
--
2.43.0
Powered by blists - more mailing lists