[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aWYbcnglF5P/WhL4@MiWiFi-R3L-srv>
Date: Tue, 13 Jan 2026 18:16:18 +0800
From: Baoquan He <bhe@...hat.com>
To: Robin Murphy <robin.murphy@....com>
Cc: m.szyprowski@...sung.com, akpm@...ux-foundation.org, vbabka@...e.cz,
david@...nel.org, iommu@...ts.linux-foundation.org,
linux-mm@...ck.org, vladimir.kondratiev@...ileye.com,
s-adivi@...com, linux-kernel@...r.kernel.org,
lorenzo.stoakes@...cle.com, Liam.Howlett@...cle.com,
rppt@...nel.org, surenb@...gle.com, mhocko@...e.com,
jackmanb@...gle.com, hannes@...xchg.org, ziy@...dia.com
Subject: Re: [PATCH 3/3] dma/pool: Avoid allocating redundant pools
On 01/12/26 at 03:46pm, Robin Murphy wrote:
> On smaller systems, e.g. embedded arm64, it is common for all memory
> to end up in ZONE_DMA32 or even ZONE_DMA. In such cases it is redundant
This is true and the whole series looks great to me. Do we need adjust
warn_alloc() to handle empty DMA32 zone too like empty DMA zone case?
> to allocate a nominal pool for an empty higher zone that just ends up
> coming from a lower zone that should already have its own pool anyway.
> We already have logic to skip allocating a ZONE_DMA pool when that is
> empty, so generalise that to save memory in the case of other zones too.
>
> Signed-off-by: Robin Murphy <robin.murphy@....com>
> ---
> kernel/dma/pool.c | 19 ++++++++++++++-----
> 1 file changed, 14 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
> index 2645cfb5718b..c5da29ad010c 100644
> --- a/kernel/dma/pool.c
> +++ b/kernel/dma/pool.c
> @@ -184,6 +184,12 @@ static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
> return pool;
> }
>
> +#ifdef CONFIG_ZONE_DMA32
> +#define has_managed_dma32 has_managed_zone(ZONE_DMA32)
> +#else
> +#define has_managed_dma32 false
> +#endif
> +
> static int __init dma_atomic_pool_init(void)
> {
> int ret = 0;
> @@ -199,17 +205,20 @@ static int __init dma_atomic_pool_init(void)
> }
> INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
>
> - atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
> + /* All memory might be in the DMA zone(s) to begin with */
> + if (has_managed_zone(ZONE_NORMAL)) {
> + atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
> GFP_KERNEL);
> - if (!atomic_pool_kernel)
> - ret = -ENOMEM;
> + if (!atomic_pool_kernel)
> + ret = -ENOMEM;
> + }
> if (has_managed_dma()) {
> atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
> GFP_KERNEL | GFP_DMA);
> if (!atomic_pool_dma)
> ret = -ENOMEM;
> }
> - if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
> + if (has_managed_dma32) {
> atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
> GFP_KERNEL | GFP_DMA32);
> if (!atomic_pool_dma32)
> @@ -228,7 +237,7 @@ static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
> return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel;
> if (gfp & GFP_DMA32)
> return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel;
> - return atomic_pool_kernel;
> + return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma;
> }
> if (prev == atomic_pool_kernel)
> return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
> --
> 2.34.1
>
>
Powered by blists - more mailing lists