>From d1ddc6e2196758923c71d649d52b9a14d678419b Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 2 Dec 2012 21:00:52 +0800 Subject: [PATCH 3/3] CMA: use new memblock interfaces to simplify implementation This patch simplifies dma-continuous.c by using new memblock interfaces. Signed-off-by: Jiang Liu --- drivers/base/Kconfig | 1 + drivers/base/dma-contiguous.c | 36 +++++++++++++----------------------- include/linux/memblock.h | 1 + 3 files changed, 15 insertions(+), 23 deletions(-) diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index b34b5cd..b0ac008 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -197,6 +197,7 @@ config CMA depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL select MIGRATION select MEMORY_ISOLATION + select HAVE_MEMBLOCK_TAG help This enables the Contiguous Memory Allocator which allows drivers to allocate big physically-contiguous blocks of memory for use with diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 612afcc..c092b76 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -190,27 +190,24 @@ no_mem: return ERR_PTR(ret); } -static struct cma_reserved { - phys_addr_t start; - unsigned long size; - struct device *dev; -} cma_reserved[MAX_CMA_AREAS] __initdata; static unsigned cma_reserved_count __initdata; static int __init cma_init_reserved_areas(void) { - struct cma_reserved *r = cma_reserved; - unsigned i = cma_reserved_count; + struct memblock_region *reg; + struct cma *cma; pr_debug("%s()\n", __func__); - for (; i; --i, ++r) { - struct cma *cma; - cma = cma_create_area(PFN_DOWN(r->start), - r->size >> PAGE_SHIFT); - if (!IS_ERR(cma)) - dev_set_cma_area(r->dev, cma); - } + for_each_memblock(memory, reg) + if (reg->tag == MEMBLOCK_TAG_CMA) { + cma = cma_create_area(PFN_DOWN(reg->base), + reg->size >> PAGE_SHIFT); + if (!IS_ERR(cma)) + dev_set_cma_area(reg->data, cma); + } + memblock_free_all_with_tag(MEMBLOCK_TAG_CMA); + return 0; } core_initcall(cma_init_reserved_areas); @@ -230,7 +227,6 @@ core_initcall(cma_init_reserved_areas); int __init dma_declare_contiguous(struct device *dev, unsigned long size, phys_addr_t base, phys_addr_t limit) { - struct cma_reserved *r = &cma_reserved[cma_reserved_count]; unsigned long alignment; pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, @@ -238,7 +234,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size, (unsigned long)limit); /* Sanity checks */ - if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { + if (cma_reserved_count == MAX_CMA_AREAS) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -277,13 +273,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size, } } - /* - * Each reserved area must be initialised later, when more kernel - * subsystems (like slab allocator) are available. - */ - r->start = base; - r->size = size; - r->dev = dev; + BUG_ON(memblock_mark_tag(base, size, MEMBLOCK_TAG_CMA, dev)); cma_reserved_count++; pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M, (unsigned long)base); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 5420ed9..a662c07 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -125,6 +125,7 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, #ifdef CONFIG_HAVE_MEMBLOCK_TAG #define MEMBLOCK_TAG_DEFAULT 0x0 /* default tag for bootmem allocatror */ #define MEMBLOCK_TAG_HOTPLUG 0x1 /* reserved for memory hotplug */ +#define MEMBLOCK_TAG_CMA 0x2 /* reserved for CMA */ int memblock_mark_tag(phys_addr_t base, phys_addr_t size, int tag, void *data); void memblock_free_all_with_tag(int tag); -- 1.7.9.5