[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250915195153.462039-6-fvdl@google.com>
Date: Mon, 15 Sep 2025 19:51:46 +0000
From: Frank van der Linden <fvdl@...gle.com>
To: akpm@...ux-foundation.org, muchun.song@...ux.dev, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: hannes@...xchg.org, david@...hat.com, roman.gushchin@...ux.dev,
Frank van der Linden <fvdl@...gle.com>
Subject: [RFC PATCH 05/12] mm/cma: add helper functions for CMA balancing
Add some CMA helper functions to assist CMA balancing. They
are:
cma_get_available.
- Returns the number of available pages in a CMA area
cma_numranges
- Returns the total number of CMA ranges.
cma_next_balance_pagerange
- Get the next CMA page range in a zone that has is available
as a target for CMA balancing. This means a range that
consists of CMA pageblocks that are managed by the buddy
allocator (not allocated through cma_alloc). The array of
CMA ranges is walked top down.
cma_next_noncma_pagerange
- Get the next non-CMA page range in a zone. The zone is
traversed bottom up.
Signed-off-by: Frank van der Linden <fvdl@...gle.com>
---
include/linux/cma.h | 30 +++++++++
mm/cma.c | 161 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 191 insertions(+)
diff --git a/include/linux/cma.h b/include/linux/cma.h
index ec48f2a11f1d..0504580d61d0 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -37,6 +37,7 @@ enum cma_flags {
#define CMA_INIT_FLAGS (CMA_FIXED|CMA_RESERVE_PAGES_ON_ERROR)
struct cma;
+struct zone;
extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
@@ -79,6 +80,12 @@ extern void cma_reserve_pages_on_error(struct cma *cma);
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);
bool cma_validate_zones(struct cma *cma);
+int cma_numranges(void);
+unsigned long cma_get_available(const struct cma *cma);
+bool cma_next_balance_pagerange(struct zone *zone, struct cma *cma, int *rindex,
+ unsigned long *startpfn, unsigned long *endpfn);
+bool cma_next_noncma_pagerange(struct zone *zone, int *rindex,
+ unsigned long *startpfn, unsigned long *endpfn);
#else
static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
{
@@ -93,6 +100,29 @@ static inline bool cma_validate_zones(struct cma *cma)
{
return false;
}
+
+static inline int cma_numranges(void)
+{
+ return 0;
+}
+
+static inline unsigned long cma_get_available(const struct cma *cma)
+{
+ return 0;
+}
+
+static inline bool cma_next_balance_pagerange(struct zone *zone,
+ struct cma *cma, int *rindex, unsigned long *start_pfn,
+ unsigned long *end_pfn)
+{
+ return false;
+}
+
+static inline bool cma_next_noncma_pagerange(struct zone *zone, int *rindex,
+ unsigned long *start_pfn, unsigned long *end_pfn)
+{
+ return false;
+}
#endif
#endif
diff --git a/mm/cma.c b/mm/cma.c
index 1f5a7bfc9152..53cb1833407b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -54,6 +54,11 @@ const char *cma_get_name(const struct cma *cma)
return cma->name;
}
+unsigned long cma_get_available(const struct cma *cma)
+{
+ return cma->available_count;
+}
+
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
unsigned int align_order)
{
@@ -202,6 +207,11 @@ static void __init cma_activate_area(struct cma *cma)
static struct cma_memrange **cma_ranges;
static int cma_nranges;
+int cma_numranges(void)
+{
+ return cma_nranges;
+}
+
static int cmprange(const void *a, const void *b)
{
struct cma_memrange *r1, *r2;
@@ -214,6 +224,157 @@ static int cmprange(const void *a, const void *b)
return r1->base_pfn - r2->base_pfn;
}
+/*
+ * Provide the next free range in a cma memory range, as derived
+ * from the bitmap.
+ *
+ * @cmr: memory range to scan
+ * @start_pfn: the beginning of the previous range
+ * @end_pfn: the end of the previous range, zero for the first call
+ *
+ * The caller can adjust *end_pfn end use it as a starting point.
+ */
+static bool cma_next_free_range(struct cma_memrange *cmr,
+ unsigned long *start_pfn, unsigned long *end_pfn)
+{
+ unsigned long zerobit, onebit, start, nbits, offset, base;
+ struct cma *cma = cmr->cma;
+
+ nbits = cma_bitmap_maxno(cmr);
+
+ if (!*end_pfn)
+ offset = start = 0;
+ else {
+ start = ((*end_pfn - cmr->base_pfn) >> cma->order_per_bit);
+ if (start >= nbits)
+ return false;
+
+ offset = *end_pfn -
+ (cmr->base_pfn + (start << cma->order_per_bit));
+ }
+
+ spin_lock_irq(&cma->lock);
+ zerobit = find_next_zero_bit(cmr->bitmap, nbits, start);
+ if (zerobit >= nbits) {
+ spin_unlock_irq(&cma->lock);
+ return false;
+ }
+ onebit = find_next_bit(cmr->bitmap, nbits, zerobit);
+ spin_unlock_irq(&cma->lock);
+
+ base = (zerobit << cma->order_per_bit) + cmr->base_pfn;
+ *start_pfn = base + offset;
+ *end_pfn = base + ((onebit - zerobit) << cma->order_per_bit);
+
+ return true;
+}
+
+static inline bool cma_should_balance_range(struct zone *zone,
+ struct cma_memrange *cmr)
+{
+ if (page_zone(pfn_to_page(cmr->base_pfn)) != zone)
+ return false;
+
+ return true;
+}
+
+/*
+ * Get the next CMA page range containing pages that have not been
+ * allocated through cma_alloc. This is just a snapshot, and the caller
+ * is expected to deal with the changing circumstances. Used to walk
+ * through CMA pageblocks in a zone in an optimized fashion during
+ * zone CMA balance compaction.
+ *
+ * If @cma is NULL, the global list of ranges is walked, else
+ * the ranges of the area pointed to by @cma are walked.
+ */
+bool cma_next_balance_pagerange(struct zone *zone, struct cma *cma,
+ int *rindex, unsigned long *start_pfn,
+ unsigned long *end_pfn)
+{
+ struct cma_memrange *cmr;
+ int i, nranges;
+
+ if (!cma_nranges)
+ return false;
+
+ nranges = cma ? cma->nranges : cma_nranges;
+
+ if (*rindex == -1) {
+ if (*end_pfn != 0) {
+ for (i = nranges - 1; i >= 0; i--) {
+ cmr = cma ? &cma->ranges[i] : cma_ranges[i];
+ if (!cma_should_balance_range(zone, cmr))
+ continue;
+ if (*end_pfn > cmr->base_pfn &&
+ *end_pfn < (cmr->base_pfn + cmr->count))
+ break;
+ }
+ } else {
+ i = nranges - 1;
+ }
+ } else {
+ i = *rindex;
+ }
+
+ for (; i >= 0; i--) {
+ cmr = cma ? &cma->ranges[i] : cma_ranges[i];
+ if (!cma_should_balance_range(zone, cmr))
+ continue;
+ if (cma_next_free_range(cmr, start_pfn, end_pfn)) {
+ *rindex = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Get the next stretch of memory in a zone that is not MIGRATE_CMA
+ * pageblocks.
+ */
+bool cma_next_noncma_pagerange(struct zone *zone, int *rindex,
+ unsigned long *start_pfn,
+ unsigned long *end_pfn)
+{
+ struct cma_memrange *cmr;
+ unsigned long cma_start, cma_end;
+ int i;
+
+ if (*end_pfn >= zone_end_pfn(zone))
+ return false;
+
+ if (*rindex == -1) {
+ *rindex = 0;
+ if (*start_pfn == 0)
+ *start_pfn = zone->zone_start_pfn;
+ } else {
+ cmr = cma_ranges[*rindex];
+ *start_pfn = cmr->base_pfn + cmr->count;
+ }
+
+ for (i = *rindex; i < cma_nranges; i++) {
+ cmr = cma_ranges[i];
+ cma_start = cmr->base_pfn;
+ cma_end = cmr->base_pfn + cmr->count;
+ if (page_zone(pfn_to_page(cma_start)) != zone)
+ continue;
+ if (*start_pfn == cma_start) {
+ *start_pfn = cma_end;
+ } else if (*start_pfn < cma_start) {
+ *rindex = i;
+ *end_pfn = cma_start;
+ return true;
+ }
+ }
+
+ *rindex = cma_nranges;
+ *end_pfn = zone_end_pfn(zone);
+
+ return true;
+}
+
static int __init cma_init_reserved_areas(void)
{
int i, r, nranges;
--
2.51.0.384.g4c02a37b29-goog
Powered by blists - more mailing lists