[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201026165543.GA42952@C02TD0UTHF1T.local>
Date: Mon, 26 Oct 2020 16:55:55 +0000
From: Mark Rutland <mark.rutland@....com>
To: Arnd Bergmann <arnd@...nel.org>
Cc: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Arnd Bergmann <arnd@...db.de>,
Vincenzo Frascino <vincenzo.frascino@....com>,
Ard Biesheuvel <ardb@...nel.org>,
Andrei Vagin <avagin@...il.com>,
Michel Lespinasse <walken@...gle.com>,
Mark Brown <broonie@...nel.org>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/4] arm64: hide more compat_vdso code
On Mon, Oct 26, 2020 at 05:03:29PM +0100, Arnd Bergmann wrote:
> From: Arnd Bergmann <arnd@...db.de>
>
> When CONFIG_COMPAT_VDSO is disabled, we get a warning
> about a potential out-of-bounds access:
>
> arch/arm64/kernel/vdso.c: In function 'aarch32_vdso_mremap':
> arch/arm64/kernel/vdso.c:86:37: warning: array subscript 1 is above array bounds of 'struct vdso_abi_info[1]' [-Warray-bounds]
> 86 | unsigned long vdso_size = vdso_info[abi].vdso_code_end -
> | ~~~~~~~~~^~~~~
>
> This is all in dead code however that the compiler is unable to
> eliminate by itself.
>
> Change the array to individual local variables that can be
> dropped in dead code elimination to let the compiler understand
> this better.
>
> Fixes: 0cbc2659123e ("arm64: vdso32: Remove a bunch of #ifdef CONFIG_COMPAT_VDSO guards")
> Signed-off-by: Arnd Bergmann <arnd@...db.de>
This looks like a nice cleanup to me! I agree we don't need the array
here.
Reviewed-by: Mark Rutland <mark.rutland@....com>
Thanks,
Mark.
> ---
> arch/arm64/kernel/vdso.c | 56 ++++++++++++++++++----------------------
> 1 file changed, 25 insertions(+), 31 deletions(-)
>
> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> index debb8995d57f..0b69d2894742 100644
> --- a/arch/arm64/kernel/vdso.c
> +++ b/arch/arm64/kernel/vdso.c
> @@ -286,36 +286,9 @@ static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
> return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
> }
>
> -enum aarch32_map {
> - AA32_MAP_VECTORS, /* kuser helpers */
> - AA32_MAP_SIGPAGE,
> - AA32_MAP_VVAR,
> - AA32_MAP_VDSO,
> -};
> -
> static struct page *aarch32_vectors_page __ro_after_init;
> static struct page *aarch32_sig_page __ro_after_init;
>
> -static struct vm_special_mapping aarch32_vdso_maps[] = {
> - [AA32_MAP_VECTORS] = {
> - .name = "[vectors]", /* ABI */
> - .pages = &aarch32_vectors_page,
> - },
> - [AA32_MAP_SIGPAGE] = {
> - .name = "[sigpage]", /* ABI */
> - .pages = &aarch32_sig_page,
> - },
> - [AA32_MAP_VVAR] = {
> - .name = "[vvar]",
> - .fault = vvar_fault,
> - .mremap = vvar_mremap,
> - },
> - [AA32_MAP_VDSO] = {
> - .name = "[vdso]",
> - .mremap = aarch32_vdso_mremap,
> - },
> -};
> -
> static int aarch32_alloc_kuser_vdso_page(void)
> {
> extern char __kuser_helper_start[], __kuser_helper_end[];
> @@ -352,14 +325,25 @@ static int aarch32_alloc_sigpage(void)
> return 0;
> }
>
> +static struct vm_special_mapping aarch32_vdso_map_vvar = {
> + .name = "[vvar]",
> + .fault = vvar_fault,
> + .mremap = vvar_mremap,
> +};
> +
> +static struct vm_special_mapping aarch32_vdso_map_vdso = {
> + .name = "[vdso]",
> + .mremap = aarch32_vdso_mremap,
> +};
> +
> static int __aarch32_alloc_vdso_pages(void)
> {
>
> if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
> return 0;
>
> - vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
> - vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
> + vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_map_vvar;
> + vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_map_vdso;
>
> return __vdso_init(VDSO_ABI_AA32);
> }
> @@ -380,6 +364,11 @@ static int __init aarch32_alloc_vdso_pages(void)
> }
> arch_initcall(aarch32_alloc_vdso_pages);
>
> +static struct vm_special_mapping aarch32_vdso_map_vectors = {
> + .name = "[vectors]", /* ABI */
> + .pages = &aarch32_vectors_page,
> +};
> +
> static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
> {
> void *ret;
> @@ -394,11 +383,16 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
> ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
> VM_READ | VM_EXEC |
> VM_MAYREAD | VM_MAYEXEC,
> - &aarch32_vdso_maps[AA32_MAP_VECTORS]);
> + &aarch32_vdso_map_vectors);
>
> return PTR_ERR_OR_ZERO(ret);
> }
>
> +static struct vm_special_mapping aarch32_vdso_map_sigpage = {
> + .name = "[sigpage]", /* ABI */
> + .pages = &aarch32_sig_page,
> +};
> +
> static int aarch32_sigreturn_setup(struct mm_struct *mm)
> {
> unsigned long addr;
> @@ -417,7 +411,7 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
> ret = _install_special_mapping(mm, addr, PAGE_SIZE,
> VM_READ | VM_EXEC | VM_MAYREAD |
> VM_MAYWRITE | VM_MAYEXEC,
> - &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
> + &aarch32_vdso_map_sigpage);
> if (IS_ERR(ret))
> goto out;
>
> --
> 2.27.0
>
Powered by blists - more mailing lists