[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250222190623.262689-3-brgerst@gmail.com>
Date: Sat, 22 Feb 2025 14:06:14 -0500
From: Brian Gerst <brgerst@...il.com>
To: linux-kernel@...r.kernel.org,
x86@...nel.org
Cc: Ingo Molnar <mingo@...nel.org>,
"H . Peter Anvin" <hpa@...or.com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>,
Ard Biesheuvel <ardb@...nel.org>,
Uros Bizjak <ubizjak@...il.com>,
Brian Gerst <brgerst@...il.com>
Subject: [RFC PATCH 02/11] x86/preempt: Move preempt count to percpu hot section
No functional change.
Signed-off-by: Brian Gerst <brgerst@...il.com>
---
arch/x86/include/asm/current.h | 1 -
arch/x86/include/asm/preempt.h | 25 +++++++++++++------------
arch/x86/kernel/cpu/common.c | 4 +++-
3 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index bf5953883ec3..9a2fe2fd7d74 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -16,7 +16,6 @@ struct pcpu_hot {
union {
struct {
struct task_struct *current_task;
- int preempt_count;
int cpu_number;
#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
u64 call_depth;
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 919909d8cb77..2f3a40cbdd76 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -4,10 +4,11 @@
#include <asm/rmwcc.h>
#include <asm/percpu.h>
-#include <asm/current.h>
#include <linux/static_call_types.h>
+DECLARE_PER_CPU_HOT(int, __preempt_count);
+
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
@@ -23,18 +24,18 @@
*/
static __always_inline int preempt_count(void)
{
- return raw_cpu_read_4(pcpu_hot.preempt_count) & ~PREEMPT_NEED_RESCHED;
+ return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
}
static __always_inline void preempt_count_set(int pc)
{
int old, new;
- old = raw_cpu_read_4(pcpu_hot.preempt_count);
+ old = raw_cpu_read_4(__preempt_count);
do {
new = (old & PREEMPT_NEED_RESCHED) |
(pc & ~PREEMPT_NEED_RESCHED);
- } while (!raw_cpu_try_cmpxchg_4(pcpu_hot.preempt_count, &old, new));
+ } while (!raw_cpu_try_cmpxchg_4(__preempt_count, &old, new));
}
/*
@@ -43,7 +44,7 @@ static __always_inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- per_cpu(pcpu_hot.preempt_count, (cpu)) = PREEMPT_DISABLED; \
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
} while (0)
/*
@@ -57,17 +58,17 @@ static __always_inline void preempt_count_set(int pc)
static __always_inline void set_preempt_need_resched(void)
{
- raw_cpu_and_4(pcpu_hot.preempt_count, ~PREEMPT_NEED_RESCHED);
+ raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
}
static __always_inline void clear_preempt_need_resched(void)
{
- raw_cpu_or_4(pcpu_hot.preempt_count, PREEMPT_NEED_RESCHED);
+ raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
}
static __always_inline bool test_preempt_need_resched(void)
{
- return !(raw_cpu_read_4(pcpu_hot.preempt_count) & PREEMPT_NEED_RESCHED);
+ return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
}
/*
@@ -76,12 +77,12 @@ static __always_inline bool test_preempt_need_resched(void)
static __always_inline void __preempt_count_add(int val)
{
- raw_cpu_add_4(pcpu_hot.preempt_count, val);
+ raw_cpu_add_4(__preempt_count, val);
}
static __always_inline void __preempt_count_sub(int val)
{
- raw_cpu_add_4(pcpu_hot.preempt_count, -val);
+ raw_cpu_add_4(__preempt_count, -val);
}
/*
@@ -91,7 +92,7 @@ static __always_inline void __preempt_count_sub(int val)
*/
static __always_inline bool __preempt_count_dec_and_test(void)
{
- return GEN_UNARY_RMWcc("decl", __my_cpu_var(pcpu_hot.preempt_count), e,
+ return GEN_UNARY_RMWcc("decl", __my_cpu_var(__preempt_count), e,
__percpu_arg([var]));
}
@@ -100,7 +101,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
*/
static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(raw_cpu_read_4(pcpu_hot.preempt_count) == preempt_offset);
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
}
#ifdef CONFIG_PREEMPTION
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8b49b1338f76..519e2ec2027d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2016,12 +2016,14 @@ __setup("clearcpuid=", setup_clearcpuid);
DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
.current_task = &init_task,
- .preempt_count = INIT_PREEMPT_COUNT,
.top_of_stack = TOP_OF_INIT_STACK,
};
EXPORT_PER_CPU_SYMBOL(pcpu_hot);
EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
+DEFINE_PER_CPU_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
+EXPORT_PER_CPU_SYMBOL(__preempt_count);
+
#ifdef CONFIG_X86_64
static void wrmsrl_cstar(unsigned long val)
{
--
2.48.1
Powered by blists - more mailing lists