[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <719e0170-7645-4787-8c3a-676f34068c27@www.fastmail.com>
Date: Sat, 13 Nov 2021 17:18:11 -0800
From: "Andy Lutomirski" <luto@...nel.org>
To: "Brian Gerst" <brgerst@...il.com>,
"Linux Kernel Mailing List" <linux-kernel@...r.kernel.org>,
"the arch/x86 maintainers" <x86@...nel.org>
Cc: "Borislav Petkov" <bp@...en8.de>,
"Thomas Gleixner" <tglx@...utronix.de>,
"H. Peter Anvin" <hpa@...or.com>, "Ingo Molnar" <mingo@...nel.org>
Subject: Re: [PATCH 3/3] x86_64: Use relative per-cpu offsets
On Sat, Nov 13, 2021, at 4:40 AM, Brian Gerst wrote:
> The per-cpu section is currently linked at virtual address 0, because
> older compilers hardcoded the stack protector canary value at a fixed
> offset from the start of the GS segment. Use a standard relative offset
> as the GS base when the stack protector is disabled, or a newer compiler
> is used that supports a configurable location for the stack canary.
Can you explain the benefit? Also, I think we should consider dropping support for the fixed model like we did on x86_32.
>
> Signed-off-by: Brian Gerst <brgerst@...il.com>
> ---
> arch/x86/Kconfig | 2 +-
> arch/x86/include/asm/percpu.h | 4 ++--
> arch/x86/kernel/head_64.S | 4 ----
> arch/x86/kernel/setup_percpu.c | 2 +-
> arch/x86/kernel/vmlinux.lds.S | 14 ++++++--------
> 5 files changed, 10 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
> index 832a6626df72..fae7724505bd 100644
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -403,7 +403,7 @@ config STACKPROTECTOR_FIXED
> default y if !$(cc-option,-mstack-protector-guard-reg=gs)
>
> config X86_ABSOLUTE_PERCPU
> - def_bool X86_64 && SMP
> + def_bool STACKPROTECTOR_FIXED && SMP
> select KALLSYMS_ABSOLUTE_PERCPU
>
> menu "Processor type and features"
> diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
> index a3c33b79fb86..8294781bb483 100644
> --- a/arch/x86/include/asm/percpu.h
> +++ b/arch/x86/include/asm/percpu.h
> @@ -16,7 +16,7 @@
> #define PER_CPU_VAR(var) var
> #endif /* SMP */
>
> -#ifdef CONFIG_X86_64_SMP
> +#ifdef CONFIG_X86_ABSOLUTE_PERCPU
> #define INIT_PER_CPU_VAR(var) init_per_cpu__##var
> #else
> #define INIT_PER_CPU_VAR(var) var
> @@ -59,7 +59,7 @@
> #define DECLARE_INIT_PER_CPU(var) \
> extern typeof(var) init_per_cpu_var(var)
>
> -#ifdef CONFIG_X86_64_SMP
> +#ifdef CONFIG_X86_ABSOLUTE_PERCPU
> #define init_per_cpu_var(var) init_per_cpu__##var
> #else
> #define init_per_cpu_var(var) var
> diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
> index 6e396ffb1610..c1b6209a01ca 100644
> --- a/arch/x86/kernel/head_64.S
> +++ b/arch/x86/kernel/head_64.S
> @@ -339,12 +339,8 @@ SYM_DATA(initial_code, .quad x86_64_start_kernel)
> #ifdef CONFIG_STACKPROTECTOR_FIXED
> SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data))
> #else
> -#ifdef CONFIG_SMP
> -SYM_DATA(initial_gs, .quad __per_cpu_load)
> -#else
> SYM_DATA(initial_gs, .quad 0)
> #endif
> -#endif
>
> #ifdef CONFIG_AMD_MEM_ENCRYPT
> SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
> diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
> index 5afd98559193..4c0020a6ced9 100644
> --- a/arch/x86/kernel/setup_percpu.c
> +++ b/arch/x86/kernel/setup_percpu.c
> @@ -26,7 +26,7 @@
> DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
> EXPORT_PER_CPU_SYMBOL(cpu_number);
>
> -#ifdef CONFIG_X86_64
> +#ifdef CONFIG_X86_ABSOLUTE_PERCPU
> #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
> #else
> #define BOOT_PERCPU_OFFSET 0
> diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
> index c475d21d2126..18e1deb9fa52 100644
> --- a/arch/x86/kernel/vmlinux.lds.S
> +++ b/arch/x86/kernel/vmlinux.lds.S
> @@ -102,10 +102,10 @@ jiffies = jiffies_64;
> PHDRS {
> text PT_LOAD FLAGS(5); /* R_E */
> data PT_LOAD FLAGS(6); /* RW_ */
> -#ifdef CONFIG_X86_64
> -#ifdef CONFIG_SMP
> +#ifdef CONFIG_X86_ABSOLUTE_PERCPU
> percpu PT_LOAD FLAGS(6); /* RW_ */
> #endif
> +#ifdef CONFIG_X86_64
> init PT_LOAD FLAGS(7); /* RWE */
> #endif
> note PT_NOTE FLAGS(0); /* ___ */
> @@ -215,7 +215,7 @@ SECTIONS
> __init_begin = .; /* paired with __init_end */
> }
>
> -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
> +#ifdef CONFIG_X86_ABSOLUTE_PERCPU
> /*
> * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
> * output PHDR, so the next output section - .init.text - should
> @@ -339,7 +339,7 @@ SECTIONS
> EXIT_DATA
> }
>
> -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
> +#ifndef CONFIG_X86_ABSOLUTE_PERCPU
> PERCPU_SECTION(INTERNODE_CACHE_BYTES)
> #endif
>
> @@ -474,7 +474,7 @@ SECTIONS
> . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
> "kernel image bigger than KERNEL_IMAGE_SIZE");
>
> -#ifdef CONFIG_X86_64
> +#ifdef CONFIG_X86_ABSOLUTE_PERCPU
> /*
> * Per-cpu symbols which need to be offset from __per_cpu_load
> * for the boot processor.
> @@ -483,13 +483,11 @@ SECTIONS
> INIT_PER_CPU(gdt_page);
> INIT_PER_CPU(irq_stack_backing_store);
>
> -#ifdef CONFIG_STACKPROTECTOR_FIXED
> INIT_PER_CPU(fixed_percpu_data);
> . = ASSERT((fixed_percpu_data == 0),
> "fixed_percpu_data is not at start of per-cpu area");
> -#endif
>
> -#endif /* CONFIG_X86_64 */
> +#endif /* CONFIG_X86_ABSOLUTE_PERCPU */
>
> #ifdef CONFIG_KEXEC_CORE
> #include <asm/kexec.h>
> --
> 2.31.1
Powered by blists - more mailing lists