[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.21.2003011537000.213582@chino.kir.corp.google.com>
Date: Sun, 1 Mar 2020 16:05:13 -0800 (PST)
From: David Rientjes <rientjes@...gle.com>
To: Christoph Hellwig <hch@....de>,
Tom Lendacky <thomas.lendacky@....com>
cc: "Singh, Brijesh" <brijesh.singh@....com>,
"Grimm, Jon" <jon.grimm@....com>, Joerg Roedel <joro@...tes.org>,
baekhw@...gle.com,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"iommu@...ts.linux-foundation.org" <iommu@...ts.linux-foundation.org>
Subject: [rfc 2/6] dma-remap: add additional atomic pools to map to gfp
mask
The single atomic pool is allocated from the lowest zone possible since
it is guaranteed to be applicable for any DMA allocation.
Devices may allocate through the DMA API but not have a strict reliance
on GFP_DMA memory. Since the atomic pool will be used for all
non-blockable allocations, returning all memory from ZONE_DMA may
unnecessarily deplete the zone.
Provision for multiple atomic pools that will map to the optimal gfp
mask of the device. These will be wired up in a subsequent patch.
Signed-off-by: David Rientjes <rientjes@...gle.com>
---
kernel/dma/remap.c | 75 +++++++++++++++++++++++++++-------------------
1 file changed, 45 insertions(+), 30 deletions(-)
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -100,6 +100,8 @@ void dma_common_free_remap(void *cpu_addr, size_t size)
#ifdef CONFIG_DMA_DIRECT_REMAP
static struct gen_pool *atomic_pool __ro_after_init;
+static struct gen_pool *atomic_pool_dma32 __ro_after_init;
+static struct gen_pool *atomic_pool_normal __ro_after_init;
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
@@ -111,66 +113,79 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
-static gfp_t dma_atomic_pool_gfp(void)
+static int __init __dma_atomic_pool_init(struct gen_pool **pool,
+ size_t pool_size, gfp_t gfp)
{
- if (IS_ENABLED(CONFIG_ZONE_DMA))
- return GFP_DMA;
- if (IS_ENABLED(CONFIG_ZONE_DMA32))
- return GFP_DMA32;
- return GFP_KERNEL;
-}
-
-static int __init dma_atomic_pool_init(void)
-{
- unsigned int pool_size_order = get_order(atomic_pool_size);
- unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
+ const unsigned int order = get_order(pool_size);
+ const unsigned long nr_pages = pool_size >> PAGE_SHIFT;
struct page *page;
void *addr;
int ret;
if (dev_get_cma_area(NULL))
- page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order, false);
+ page = dma_alloc_from_contiguous(NULL, nr_pages, order, false);
else
- page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
+ page = alloc_pages(gfp, order);
if (!page)
goto out;
- arch_dma_prep_coherent(page, atomic_pool_size);
+ arch_dma_prep_coherent(page, pool_size);
- atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!atomic_pool)
+ *pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!*pool)
goto free_page;
- addr = dma_common_contiguous_remap(page, atomic_pool_size,
+ addr = dma_common_contiguous_remap(page, pool_size,
pgprot_dmacoherent(PAGE_KERNEL),
__builtin_return_address(0));
if (!addr)
goto destroy_genpool;
- ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
- page_to_phys(page), atomic_pool_size, -1);
+ ret = gen_pool_add_virt(*pool, (unsigned long)addr, page_to_phys(page),
+ pool_size, -1);
if (ret)
goto remove_mapping;
- gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
+ gen_pool_set_algo(*pool, gen_pool_first_fit_order_align, NULL);
- pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
- atomic_pool_size / 1024);
+ pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
+ pool_size >> 10, &gfp);
return 0;
remove_mapping:
- dma_common_free_remap(addr, atomic_pool_size);
+ dma_common_free_remap(addr, pool_size);
destroy_genpool:
- gen_pool_destroy(atomic_pool);
- atomic_pool = NULL;
+ gen_pool_destroy(*pool);
+ *pool = NULL;
free_page:
if (!dma_release_from_contiguous(NULL, page, nr_pages))
- __free_pages(page, pool_size_order);
+ __free_pages(page, order);
out:
- pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
- atomic_pool_size / 1024);
+ pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
+ atomic_pool_size >> 10, &gfp);
return -ENOMEM;
}
+
+static int __init dma_atomic_pool_init(void)
+{
+ int ret = 0;
+ int err;
+
+ ret = __dma_atomic_pool_init(&atomic_pool_normal, atomic_pool_size,
+ GFP_KERNEL);
+ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+ err = __dma_atomic_pool_init(&atomic_pool, atomic_pool_size,
+ GFP_DMA);
+ if (!ret && err)
+ ret = err;
+ }
+ if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
+ err = __dma_atomic_pool_init(&atomic_pool_dma32,
+ atomic_pool_size, GFP_DMA32);
+ if (!ret && err)
+ ret = err;
+ }
+ return ret;
+}
postcore_initcall(dma_atomic_pool_init);
static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size)
Powered by blists - more mailing lists