[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAFULd4Z3o18zRebbqTgNH9QrPkoSLtbS=_mZGfheJYoeMawUaw@mail.gmail.com>
Date: Wed, 26 Feb 2025 21:08:02 +0100
From: Uros Bizjak <ubizjak@...il.com>
To: Brian Gerst <brgerst@...il.com>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
Ingo Molnar <mingo@...nel.org>, "H . Peter Anvin" <hpa@...or.com>, Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>, Ard Biesheuvel <ardb@...nel.org>,
Linus Torvalds <torvalds@...uxfoundation.org>, Andy Lutomirski <luto@...nel.org>,
Peter Zijlstra <peterz@...radead.org>, Andrew Morton <akpm@...ux-foundation.org>
Subject: Re: [PATCH v2 08/11] x86/percpu: Move top_of_stack to percpu hot section
On Wed, Feb 26, 2025 at 7:06 PM Brian Gerst <brgerst@...il.com> wrote:
>
> No functional change.
>
> Signed-off-by: Brian Gerst <brgerst@...il.com>
> ---
> arch/x86/entry/entry_32.S | 4 ++--
> arch/x86/entry/entry_64.S | 6 +++---
> arch/x86/entry/entry_64_compat.S | 4 ++--
> arch/x86/include/asm/current.h | 1 -
> arch/x86/include/asm/percpu.h | 2 +-
> arch/x86/include/asm/processor.h | 8 ++++++--
> arch/x86/kernel/asm-offsets.c | 1 -
> arch/x86/kernel/cpu/common.c | 3 ++-
> arch/x86/kernel/process_32.c | 4 ++--
> arch/x86/kernel/process_64.c | 2 +-
> arch/x86/kernel/smpboot.c | 2 +-
> arch/x86/kernel/vmlinux.lds.S | 1 +
> 12 files changed, 21 insertions(+), 17 deletions(-)
>
> diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
> index 20be5758c2d2..92c0b4a94e0a 100644
> --- a/arch/x86/entry/entry_32.S
> +++ b/arch/x86/entry/entry_32.S
> @@ -1153,7 +1153,7 @@ SYM_CODE_START(asm_exc_nmi)
> * is using the thread stack right now, so it's safe for us to use it.
> */
> movl %esp, %ebx
> - movl PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esp
> + movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
> call exc_nmi
> movl %ebx, %esp
>
> @@ -1217,7 +1217,7 @@ SYM_CODE_START(rewind_stack_and_make_dead)
> /* Prevent any naive code from trying to unwind to our caller. */
> xorl %ebp, %ebp
>
> - movl PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esi
> + movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
> leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
>
> call make_task_dead
> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> index 33a955aa01d8..9baf32a7a118 100644
> --- a/arch/x86/entry/entry_64.S
> +++ b/arch/x86/entry/entry_64.S
> @@ -92,7 +92,7 @@ SYM_CODE_START(entry_SYSCALL_64)
> /* tss.sp2 is scratch space. */
> movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
> SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
> - movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
> + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
>
> SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
> ANNOTATE_NOENDBR
> @@ -1166,7 +1166,7 @@ SYM_CODE_START(asm_exc_nmi)
> FENCE_SWAPGS_USER_ENTRY
> SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
> movq %rsp, %rdx
> - movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
> + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
> UNWIND_HINT_IRET_REGS base=%rdx offset=8
> pushq 5*8(%rdx) /* pt_regs->ss */
> pushq 4*8(%rdx) /* pt_regs->rsp */
> @@ -1484,7 +1484,7 @@ SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
> /* Prevent any naive code from trying to unwind to our caller. */
> xorl %ebp, %ebp
>
> - movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax
> + movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
> leaq -PTREGS_SIZE(%rax), %rsp
> UNWIND_HINT_REGS
>
> diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
> index ed0a5f2dc129..a45e1125fc6c 100644
> --- a/arch/x86/entry/entry_64_compat.S
> +++ b/arch/x86/entry/entry_64_compat.S
> @@ -57,7 +57,7 @@ SYM_CODE_START(entry_SYSENTER_compat)
> SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
> popq %rax
>
> - movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
> + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
>
> /* Construct struct pt_regs on stack */
> pushq $__USER_DS /* pt_regs->ss */
> @@ -193,7 +193,7 @@ SYM_CODE_START(entry_SYSCALL_compat)
> SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
>
> /* Switch to the kernel stack */
> - movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
> + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
>
> SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
> ANNOTATE_NOENDBR
> diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
> index 6fad5a4c21d7..3d1b123c2ee3 100644
> --- a/arch/x86/include/asm/current.h
> +++ b/arch/x86/include/asm/current.h
> @@ -14,7 +14,6 @@ struct task_struct;
>
> struct pcpu_hot {
> struct task_struct *current_task;
> - unsigned long top_of_stack;
> };
>
> DECLARE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot);
> diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
> index 7cb4f64b2e60..044410462d36 100644
> --- a/arch/x86/include/asm/percpu.h
> +++ b/arch/x86/include/asm/percpu.h
> @@ -554,7 +554,7 @@ do { \
> * it is accessed while this_cpu_read_stable() allows the value to be cached.
> * this_cpu_read_stable() is more efficient and can be used if its value
> * is guaranteed to be valid across CPUs. The current users include
> - * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
> + * pcpu_hot.current_task and cpu_current_top_of_stack, both of which are
> * actually per-thread variables implemented as per-CPU variables and
> * thus stable for the duration of the respective task.
> */
> diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
> index 54fce8d7504d..b4d51de071f2 100644
> --- a/arch/x86/include/asm/processor.h
> +++ b/arch/x86/include/asm/processor.h
> @@ -427,6 +427,10 @@ DECLARE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
> DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
> #endif
>
> +DECLARE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack);
> +/* const-qualified alias provided by the linker. */
> +DECLARE_PER_CPU_CACHE_HOT(const unsigned long __percpu_seg_override, const_cpu_current_top_of_stack);
Please split the above line, like you did with const_current_task declaration.
Uros.
Powered by blists - more mailing lists