[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240809114854.3745464-7-kirill.shutemov@linux.intel.com>
Date: Fri, 9 Aug 2024 14:48:52 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
"Borislav Petkov (AMD)" <bp@...en8.de>,
Mel Gorman <mgorman@...e.de>,
Vlastimil Babka <vbabka@...e.cz>
Cc: Tom Lendacky <thomas.lendacky@....com>,
Mike Rapoport <rppt@...nel.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
David Hildenbrand <david@...hat.com>,
Johannes Weiner <hannes@...xchg.org>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [PATCHv2 6/8] mm: Add a helper to accept page
Accept a given struct page and add it free list.
The help is useful for physical memory scanners that want to use free
unaccepted memory.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
Acked-by: David Hildenbrand <david@...hat.com>
---
mm/internal.h | 8 ++++++++
mm/page_alloc.c | 53 +++++++++++++++++++++++++++++++++++--------------
2 files changed, 46 insertions(+), 15 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 1159b04e76a3..ff47d57733ad 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1431,4 +1431,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long new_addr, unsigned long len,
bool need_rmap_locks, bool for_stack);
+#ifdef CONFIG_UNACCEPTED_MEMORY
+void accept_page(struct page *page);
+#else /* CONFIG_UNACCEPTED_MEMORY */
+static inline void accept_page(struct page *page)
+{
+}
+#endif /* CONFIG_UNACCEPTED_MEMORY */
+
#endif /* __MM_INTERNAL_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e7e304d9e739..f0610c691ae5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6980,11 +6980,46 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order)
return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
}
+static void __accept_page(struct zone *zone, unsigned long *flags,
+ struct page *page)
+{
+ bool last;
+
+ list_del(&page->lru);
+ last = list_empty(&zone->unaccepted_pages);
+
+ account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
+ __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
+ __ClearPageUnaccepted(page);
+ spin_unlock_irqrestore(&zone->lock, *flags);
+
+ accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
+
+ __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
+
+ if (last)
+ static_branch_dec(&zones_with_unaccepted_pages);
+}
+
+void accept_page(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ if (!PageUnaccepted(page)) {
+ spin_unlock_irqrestore(&zone->lock, flags);
+ return;
+ }
+
+ /* Unlocks zone->lock */
+ __accept_page(zone, &flags, page);
+}
+
static bool try_to_accept_memory_one(struct zone *zone)
{
unsigned long flags;
struct page *page;
- bool last;
spin_lock_irqsave(&zone->lock, flags);
page = list_first_entry_or_null(&zone->unaccepted_pages,
@@ -6994,20 +7029,8 @@ static bool try_to_accept_memory_one(struct zone *zone)
return false;
}
- list_del(&page->lru);
- last = list_empty(&zone->unaccepted_pages);
-
- account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
- __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
- __ClearPageUnaccepted(page);
- spin_unlock_irqrestore(&zone->lock, flags);
-
- accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
-
- __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
-
- if (last)
- static_branch_dec(&zones_with_unaccepted_pages);
+ /* Unlocks zone->lock */
+ __accept_page(zone, &flags, page);
return true;
}
--
2.43.0
Powered by blists - more mailing lists