[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20260130-dmabuf-heap-cma-dmem-v1-1-3647ea993e99@redhat.com>
Date: Fri, 30 Jan 2026 17:55:30 -0500
From: Eric Chanudet <echanude@...hat.com>
To: Sumit Semwal <sumit.semwal@...aro.org>,
Benjamin Gaignard <benjamin.gaignard@...labora.com>,
Brian Starkey <Brian.Starkey@....com>, John Stultz <jstultz@...gle.com>,
"T.J. Mercier" <tjmercier@...gle.com>,
Christian König <christian.koenig@....com>
Cc: linux-media@...r.kernel.org, dri-devel@...ts.freedesktop.org,
linaro-mm-sig@...ts.linaro.org, linux-kernel@...r.kernel.org,
Maxime Ripard <mripard@...hat.com>, Albert Esteve <aesteve@...hat.com>,
Eric Chanudet <echanude@...hat.com>
Subject: [PATCH] dma-buf: heaps: cma: register a dmem region for each cma
heap
The cma dma-buf heaps let userspace allocate buffers in CMA regions
without enforcing limits. Register a dmem region per cma heap and charge
against it when allocating a buffer in a cma heap.
For the default cma region, two heaps may be created for the same cma
range:
commit 854acbe75ff4 ("dma-buf: heaps: Give default CMA heap a fixed name")
Introduced /dev/dma_heap/default_cma_region
commit 4f5f8baf7341 ("dma-buf: heaps: cma: Create CMA heap for each CMA
reserved region")
Created a CMA heap for each CMA region, which might create a duplicate
heap to the default one, e.g:
/dev/dma_heap/default_cma_region
/dev/dma_heap/reserved
Removing the legacy heap would break user API. So handle the special
case by using one dmem between the two heaps to account charges
correctly.
Signed-off-by: Eric Chanudet <echanude@...hat.com>
---
In continuation with introducing cgroup for the system heap[1], this
behavior is enabled based on dma_heap.mem_accounting, disabled by
default.
dmem is chosen for CMA heaps as it allows limits to be set for each
region backing each heap. There is one caveat for the default cma range
that may accessible through two different cma heaps, which is treated as
a special case.
[1] https://lore.kernel.org/all/20260116-dmabuf-heap-system-memcg-v3-0-ecc6b62cc446@redhat.com/
---
drivers/dma-buf/heaps/cma_heap.c | 51 ++++++++++++++++++++++++++++++++++++----
1 file changed, 46 insertions(+), 5 deletions(-)
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 49cc45fb42dd7200c3c14384bcfdbe85323454b1..608af8ad6bce7fe0321da6d8f1b65a69f5d8d950 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -27,6 +27,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/cgroup_dmem.h>
#define DEFAULT_CMA_NAME "default_cma_region"
@@ -46,7 +47,9 @@ int __init dma_heap_cma_register_heap(struct cma *cma)
struct cma_heap {
struct dma_heap *heap;
struct cma *cma;
+ struct dmem_cgroup_region *cg;
};
+static struct dmem_cgroup_region *default_cma_cg;
struct cma_heap_buffer {
struct cma_heap *heap;
@@ -58,6 +61,7 @@ struct cma_heap_buffer {
pgoff_t pagecount;
int vmap_cnt;
void *vaddr;
+ struct dmem_cgroup_pool_state *pool;
};
struct dma_heap_attachment {
@@ -276,6 +280,7 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
kfree(buffer->pages);
/* release memory */
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
+ dmem_cgroup_uncharge(buffer->pool, buffer->len);
kfree(buffer);
}
@@ -319,9 +324,16 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
+ if (mem_accounting) {
+ ret = dmem_cgroup_try_charge(cma_heap->cg, size,
+ &buffer->pool, NULL);
+ if (ret)
+ goto free_buffer;
+ }
+
cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
if (!cma_pages)
- goto free_buffer;
+ goto uncharge_cgroup;
/* Clear the cma pages */
if (PageHighMem(cma_pages)) {
@@ -376,6 +388,8 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
kfree(buffer->pages);
free_cma:
cma_release(cma_heap->cma, cma_pages, pagecount);
+uncharge_cgroup:
+ dmem_cgroup_uncharge(buffer->pool, size);
free_buffer:
kfree(buffer);
@@ -390,25 +404,52 @@ static int __init __add_cma_heap(struct cma *cma, const char *name)
{
struct dma_heap_export_info exp_info;
struct cma_heap *cma_heap;
+ struct dmem_cgroup_region *region;
+ int ret;
cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
if (!cma_heap)
return -ENOMEM;
cma_heap->cma = cma;
+ /*
+ * If two heaps are created for the default cma region, use the same
+ * dmem for them. They both use the same memory pool.
+ */
+ if (dev_get_cma_area(NULL) == cma && default_cma_cg)
+ region = default_cma_cg;
+ else {
+ region = dmem_cgroup_register_region(cma_get_size(cma), "cma/%s", name);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
+ goto free_cma_heap;
+ }
+ }
+ cma_heap->cg = region;
+
exp_info.name = name;
exp_info.ops = &cma_heap_ops;
exp_info.priv = cma_heap;
cma_heap->heap = dma_heap_add(&exp_info);
if (IS_ERR(cma_heap->heap)) {
- int ret = PTR_ERR(cma_heap->heap);
-
- kfree(cma_heap);
- return ret;
+ ret = PTR_ERR(cma_heap->heap);
+ goto cg_unregister;
}
+ if (dev_get_cma_area(NULL) == cma && !default_cma_cg)
+ default_cma_cg = region;
+
return 0;
+
+cg_unregister:
+ /* default_cma_cg == cma_heap->cg only for the duplicate heap. */
+ if (default_cma_cg != cma_heap->cg)
+ dmem_cgroup_unregister_region(cma_heap->cg);
+free_cma_heap:
+ kfree(cma_heap);
+
+ return ret;
}
static int __init add_cma_heaps(void)
---
base-commit: 3d65e4c276b32c03450261d114e495fda03c8e97
change-id: 20260128-dmabuf-heap-cma-dmem-f4120a2df4a8
Best regards,
--
Eric Chanudet <echanude@...hat.com>
Powered by blists - more mailing lists