[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250129224157.2046079-24-fvdl@google.com>
Date: Wed, 29 Jan 2025 22:41:52 +0000
From: Frank van der Linden <fvdl@...gle.com>
To: akpm@...ux-foundation.org, muchun.song@...ux.dev, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: yuzhao@...gle.com, usamaarif642@...il.com, joao.m.martins@...cle.com,
roman.gushchin@...ux.dev, Frank van der Linden <fvdl@...gle.com>
Subject: [PATCH v2 23/28] mm/cma: simplify zone intersection check
cma_activate_area walks all pages in the area, checking
their zone individually to see if the area resides in
more than one zone.
Make this a little more efficient by using the recently
introduced pfn_range_intersects_zones() function. Store
the NUMA node id (if any) in the cma structure to facilitate
this.
Signed-off-by: Frank van der Linden <fvdl@...gle.com>
---
mm/cma.c | 13 ++++++-------
mm/cma.h | 2 ++
2 files changed, 8 insertions(+), 7 deletions(-)
diff --git a/mm/cma.c b/mm/cma.c
index 1704d5be6a07..6ad631c9fdca 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -103,7 +103,6 @@ static void __init cma_activate_area(struct cma *cma)
{
unsigned long pfn, base_pfn;
int allocrange, r;
- struct zone *zone;
struct cma_memrange *cmr;
for (allocrange = 0; allocrange < cma->nranges; allocrange++) {
@@ -124,12 +123,8 @@ static void __init cma_activate_area(struct cma *cma)
* CMA resv range to be in the same zone.
*/
WARN_ON_ONCE(!pfn_valid(base_pfn));
- zone = page_zone(pfn_to_page(base_pfn));
- for (pfn = base_pfn + 1; pfn < base_pfn + cmr->count; pfn++) {
- WARN_ON_ONCE(!pfn_valid(pfn));
- if (page_zone(pfn_to_page(pfn)) != zone)
- goto cleanup;
- }
+ if (pfn_range_intersects_zones(cma->nid, base_pfn, cmr->count))
+ goto cleanup;
for (pfn = base_pfn; pfn < base_pfn + cmr->count;
pfn += pageblock_nr_pages)
@@ -261,6 +256,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
cma->ranges[0].base_pfn = PFN_DOWN(base);
cma->ranges[0].count = cma->count;
cma->nranges = 1;
+ cma->nid = NUMA_NO_NODE;
*res_cma = cma;
@@ -497,6 +493,7 @@ int __init cma_declare_contiguous_multi(phys_addr_t total_size,
}
cma->nranges = nr;
+ cma->nid = nid;
*res_cma = cma;
out:
@@ -684,6 +681,8 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t base,
if (ret)
memblock_phys_free(base, size);
+ (*res_cma)->nid = nid;
+
return ret;
}
diff --git a/mm/cma.h b/mm/cma.h
index 5f39dd1aac91..ff79dba5508c 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -50,6 +50,8 @@ struct cma {
struct cma_kobject *cma_kobj;
#endif
bool reserve_pages_on_error;
+ /* NUMA node (NUMA_NO_NODE if unspecified) */
+ int nid;
};
extern struct cma cma_areas[MAX_CMA_AREAS];
--
2.48.1.262.g85cc9f2d1e-goog
Powered by blists - more mailing lists