[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230319220008.2138576-3-rppt@kernel.org>
Date: Sun, 19 Mar 2023 23:59:55 +0200
From: Mike Rapoport <rppt@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: David Hildenbrand <david@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Mel Gorman <mgorman@...e.de>, Michal Hocko <mhocko@...nel.org>,
Mike Rapoport <rppt@...nel.org>,
Thomas Bogendoerfer <tsbogend@...ha.franken.de>,
Vlastimil Babka <vbabka@...e.cz>, linux-kernel@...r.kernel.org,
linux-mips@...r.kernel.org, linux-mm@...ck.org
Subject: [PATCH 02/15] mm/cma: move init_cma_reserved_pages() to cma.c and make it static
From: "Mike Rapoport (IBM)" <rppt@...nel.org>
init_cma_reserved_pages() only used in cma.c, no point of having it in
page_alloc.c.
Move init_cma_reserved_pages() to cma.c and make it static.
Signed-off-by: Mike Rapoport (IBM) <rppt@...nel.org>
---
include/linux/gfp.h | 5 -----
mm/cma.c | 21 +++++++++++++++++++++
mm/page_alloc.c | 21 ---------------------
3 files changed, 21 insertions(+), 26 deletions(-)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 65a78773dcca..7c554e4bd49f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -361,9 +361,4 @@ extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
-#ifdef CONFIG_CMA
-/* CMA stuff */
-extern void init_cma_reserved_pageblock(struct page *page);
-#endif
-
#endif /* __LINUX_GFP_H */
diff --git a/mm/cma.c b/mm/cma.c
index a7263aa02c92..ce08fb9825b4 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -31,8 +31,10 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/kmemleak.h>
+#include <linux/page-isolation.h>
#include <trace/events/cma.h>
+#include "internal.h"
#include "cma.h"
struct cma cma_areas[MAX_CMA_AREAS];
@@ -93,6 +95,25 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
spin_unlock_irqrestore(&cma->lock, flags);
}
+/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
+static void init_cma_reserved_pageblock(struct page *page)
+{
+ unsigned i = pageblock_nr_pages;
+ struct page *p = page;
+
+ do {
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+ } while (++p, --i);
+
+ set_pageblock_migratetype(page, MIGRATE_CMA);
+ set_page_refcounted(page);
+ __free_pages(page, pageblock_order);
+
+ adjust_managed_page_count(page, pageblock_nr_pages);
+ page_zone(page)->cma_pages += pageblock_nr_pages;
+}
+
static void __init cma_activate_area(struct cma *cma)
{
unsigned long base_pfn = cma->base_pfn, pfn;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 87d760236dba..22e3da842e3f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2280,27 +2280,6 @@ void __init page_alloc_init_late(void)
set_zone_contiguous(zone);
}
-#ifdef CONFIG_CMA
-/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
-void __init init_cma_reserved_pageblock(struct page *page)
-{
- unsigned i = pageblock_nr_pages;
- struct page *p = page;
-
- do {
- __ClearPageReserved(p);
- set_page_count(p, 0);
- } while (++p, --i);
-
- set_pageblock_migratetype(page, MIGRATE_CMA);
- set_page_refcounted(page);
- __free_pages(page, pageblock_order);
-
- adjust_managed_page_count(page, pageblock_nr_pages);
- page_zone(page)->cma_pages += pageblock_nr_pages;
-}
-#endif
-
/*
* The order of subdivision here is critical for the IO subsystem.
* Please do not alter this order without good reasons and regression
--
2.35.1
Powered by blists - more mailing lists