[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b93ec55c-f6f0-274a-e7d6-edb419b4be8a@huawei.com>
Date: Sun, 29 Jan 2023 10:44:31 +0800
From: Liu Shixin <liushixin2@...wei.com>
To: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Uladzislau Rezki <urezki@...il.com>,
Christoph Hellwig <hch@...radead.org>
CC: <linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>
Subject: Re: [PATCH RFC] arm64/vmalloc: use module region only for
module_alloc() if CONFIG_RANDOMIZE_BASE is set
Hi,
This patch seems to have been lost in the corner. Recently I've meet this problem again
on v6.1, so I would like to propose this patch again.
Thanks,
On 2022/12/27 17:26, Liu Shixin wrote:
> After I add a 10GB pmem device, I got the following error message when
> insert module:
>
> insmod: vmalloc error: size 16384, vm_struct allocation failed,
> mode:0xcc0(GFP_KERNEL), nodemask=(null),cpuset=/,mems_allowed=0
>
> If CONFIG_RANDOMIZE_BASE is set, the module region can be located in the
> vmalloc region entirely. Although module_alloc() can fall back to a 2GB
> window if ARM64_MODULE_PLTS is set, the module region is still easily
> exhausted because the module region is located at bottom of vmalloc region
> and the vmalloc region is allocated from bottom to top.
>
> Skip module region if not calling from module_alloc().
>
> Signed-off-by: Liu Shixin <liushixin2@...wei.com>
> ---
> arch/arm64/include/asm/vmalloc.h | 26 ++++++++++++++++++++++++++
> include/linux/vmalloc.h | 9 +++++++++
> mm/vmalloc.c | 4 ++++
> 3 files changed, 39 insertions(+)
>
> diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
> index 38fafffe699f..4feff546b11b 100644
> --- a/arch/arm64/include/asm/vmalloc.h
> +++ b/arch/arm64/include/asm/vmalloc.h
> @@ -31,4 +31,30 @@ static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
> return pgprot_tagged(prot);
> }
>
> +#ifdef CONFIG_RANDOMIZE_BASE
> +extern u64 module_alloc_base;
> +#define arch_vmap_skip_module_region arch_vmap_skip_module_region
> +static inline void arch_vmap_skip_module_region(unsigned long *addr,
> + unsigned long vstart,
> + unsigned long size,
> + unsigned long align)
> +{
> + u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
> +
> + if (vstart == module_alloc_base)
> + return;
> +
> + if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
> + IS_ENABLED(CONFIG_KASAN_SW_TAGS))
> + /* don't exceed the static module region - see module_alloc() */
> + module_alloc_end = MODULES_END;
> +
> + if ((module_alloc_base >= *addr + size) ||
> + (module_alloc_end <= *addr))
> + return;
> +
> + *addr = ALIGN(module_alloc_end, align);
> +}
> +#endif
> +
> #endif /* _ASM_ARM64_VMALLOC_H */
> diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> index 096d48aa3437..55ef97325b84 100644
> --- a/include/linux/vmalloc.h
> +++ b/include/linux/vmalloc.h
> @@ -122,6 +122,15 @@ static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
> }
> #endif
>
> +#ifndef arch_vmap_skip_module_region
> +static inline void arch_vmap_skip_module_region(unsigned long *addr,
> + unsigned long vstart,
> + unsigned long size,
> + unsigned long align)
> +{
> +}
> +#endif
> +
> /*
> * Highlevel APIs for driver use
> */
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index ca71de7c9d77..c840d673052e 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1236,6 +1236,8 @@ is_within_this_va(struct vmap_area *va, unsigned long size,
> else
> nva_start_addr = ALIGN(vstart, align);
>
> + arch_vmap_skip_module_region(&nva_start_addr, vstart, size, align);
> +
> /* Can be overflowed due to big size or alignment. */
> if (nva_start_addr + size < nva_start_addr ||
> nva_start_addr < vstart)
> @@ -1523,6 +1525,8 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> else
> nva_start_addr = ALIGN(vstart, align);
>
> + arch_vmap_skip_module_region(&nva_start_addr, vstart, size, align);
> +
> /* Check the "vend" restriction. */
> if (nva_start_addr + size > vend)
> return vend;
Powered by blists - more mailing lists