[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250310-dmem-cgroups-v1-1-2984c1bc9312@kernel.org>
Date: Mon, 10 Mar 2025 13:06:07 +0100
From: Maxime Ripard <mripard@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Robin Murphy <robin.murphy@....com>, Sumit Semwal <sumit.semwal@...aro.org>,
Christian König <christian.koenig@....com>,
Benjamin Gaignard <benjamin.gaignard@...labora.com>,
Brian Starkey <Brian.Starkey@....com>, John Stultz <jstultz@...gle.com>,
"T.J. Mercier" <tjmercier@...gle.com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Thomas Zimmermann <tzimmermann@...e.de>, David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>, Tomasz Figa <tfiga@...omium.org>,
Mauro Carvalho Chehab <mchehab@...nel.org>
Cc: Hans Verkuil <hverkuil@...all.nl>,
Laurent Pinchart <laurent.pinchart+renesas@...asonboard.com>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org, iommu@...ts.linux.dev,
linux-media@...r.kernel.org, dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org, Maxime Ripard <mripard@...nel.org>
Subject: [PATCH RFC 01/12] cma: Register dmem region for each cma region
Now that the dmem cgroup has been merged, we need to create memory
regions for each allocator devices might allocate DMA memory from.
Since CMA is one of these allocators, we need to create such a region.
CMA can deal with multiple regions though, so we'll need to create a
dmem region per CMA region.
Signed-off-by: Maxime Ripard <mripard@...nel.org>
---
mm/cma.c | 14 +++++++++++++-
mm/cma.h | 3 +++
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/mm/cma.c b/mm/cma.c
index de5bc0c81fc232bf82cd7ef22f6097059ab605e2..41a9ae907dcf69a73e963830d2c5f589dfc44f22 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -21,10 +21,11 @@
#include <linux/mm.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/cma.h>
+#include <linux/cgroup_dmem.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/kmemleak.h>
#include <trace/events/cma.h>
@@ -89,16 +90,25 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
spin_unlock_irqrestore(&cma->lock, flags);
}
static void __init cma_activate_area(struct cma *cma)
{
+ struct dmem_cgroup_region *region;
unsigned long base_pfn = cma->base_pfn, pfn;
struct zone *zone;
+ region = dmem_cgroup_register_region(cma_get_size(cma), "cma/%s", cma->name);
+ if (IS_ERR(region))
+ goto out_error;
+
+#ifdef CONFIG_CGROUP_DMEM
+ cma->dmem_cgrp_region = region;
+#endif
+
cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
if (!cma->bitmap)
- goto out_error;
+ goto unreg_dmem;
/*
* alloc_contig_range() requires the pfn range specified to be in the
* same zone. Simplify by forcing the entire CMA resv range to be in the
* same zone.
@@ -124,10 +134,12 @@ static void __init cma_activate_area(struct cma *cma)
return;
not_in_zone:
bitmap_free(cma->bitmap);
+unreg_dmem:
+ dmem_cgroup_unregister_region(region);
out_error:
/* Expose all pages to the buddy, they are useless for CMA. */
if (!cma->reserve_pages_on_error) {
for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
free_reserved_page(pfn_to_page(pfn));
diff --git a/mm/cma.h b/mm/cma.h
index 8485ef893e99d8da5ee41eb03194b5b00ff088ba..e05d3eb7c173f3fe75ad7808968925c77d190c80 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -29,10 +29,13 @@ struct cma {
atomic64_t nr_pages_failed;
/* the number of CMA page released */
atomic64_t nr_pages_released;
/* kobject requires dynamic object */
struct cma_kobject *cma_kobj;
+#endif
+#ifdef CONFIG_CGROUP_DMEM
+ struct dmem_cgroup_region *dmem_cgrp_region;
#endif
bool reserve_pages_on_error;
};
extern struct cma cma_areas[MAX_CMA_AREAS];
--
2.48.1
Powered by blists - more mailing lists