[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250222190623.262689-4-brgerst@gmail.com>
Date: Sat, 22 Feb 2025 14:06:15 -0500
From: Brian Gerst <brgerst@...il.com>
To: linux-kernel@...r.kernel.org,
x86@...nel.org
Cc: Ingo Molnar <mingo@...nel.org>,
"H . Peter Anvin" <hpa@...or.com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>,
Ard Biesheuvel <ardb@...nel.org>,
Uros Bizjak <ubizjak@...il.com>,
Brian Gerst <brgerst@...il.com>
Subject: [RFC PATCH 03/11] x86/smp: Move cpu number to percpu hot section
No functional change.
Signed-off-by: Brian Gerst <brgerst@...il.com>
---
arch/x86/include/asm/current.h | 1 -
arch/x86/include/asm/smp.h | 7 ++++---
arch/x86/kernel/setup_percpu.c | 5 ++++-
kernel/bpf/verifier.c | 4 ++--
4 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 9a2fe2fd7d74..119099431c76 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -16,7 +16,6 @@ struct pcpu_hot {
union {
struct {
struct task_struct *current_task;
- int cpu_number;
#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
u64 call_depth;
#endif
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index ca073f40698f..a249f9991708 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -5,9 +5,10 @@
#include <linux/cpumask.h>
#include <asm/cpumask.h>
-#include <asm/current.h>
#include <asm/thread_info.h>
+DECLARE_PER_CPU_HOT(int, cpu_number);
+
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
@@ -133,8 +134,8 @@ __visible void smp_call_function_single_interrupt(struct pt_regs *r);
* This function is needed by all SMP systems. It must _always_ be valid
* from the initial startup.
*/
-#define raw_smp_processor_id() this_cpu_read(pcpu_hot.cpu_number)
-#define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number)
+#define raw_smp_processor_id() this_cpu_read(cpu_number)
+#define __smp_processor_id() __this_cpu_read(cpu_number)
#ifdef CONFIG_X86_32
extern int safe_smp_processor_id(void);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 1e7be9409aa2..0ea3443433c5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -23,6 +23,9 @@
#include <asm/cpumask.h>
#include <asm/cpu.h>
+DEFINE_PER_CPU_HOT(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
@@ -161,7 +164,7 @@ void __init setup_per_cpu_areas(void)
for_each_possible_cpu(cpu) {
per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
- per_cpu(pcpu_hot.cpu_number, cpu) = cpu;
+ per_cpu(cpu_number, cpu) = cpu;
setup_percpu_segment(cpu);
/*
* Copy data used in early init routines from the
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9971c03adfd5..604134d33282 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -21687,12 +21687,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
if (insn->imm == BPF_FUNC_get_smp_processor_id &&
verifier_inlines_helper_call(env, insn->imm)) {
/* BPF_FUNC_get_smp_processor_id inlining is an
- * optimization, so if pcpu_hot.cpu_number is ever
+ * optimization, so if cpu_number is ever
* changed in some incompatible and hard to support
* way, it's fine to back out this inlining logic
*/
#ifdef CONFIG_SMP
- insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number);
+ insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&cpu_number);
insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0);
cnt = 3;
--
2.48.1
Powered by blists - more mailing lists