[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250327205355.378659-3-mingo@kernel.org>
Date: Thu, 27 Mar 2025 21:53:15 +0100
From: Ingo Molnar <mingo@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Juergen Gross <jgross@...e.com>,
"H . Peter Anvin" <hpa@...or.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Peter Zijlstra <peterz@...radead.org>,
Borislav Petkov <bp@...en8.de>,
Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 02/41] x86/alternatives: Rename 'bp_refs' to 'int3_refs'
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
arch/x86/kernel/alternative.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 4e932e95c744..cb9ac69694fb 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -2476,14 +2476,14 @@ struct int3_patching_desc {
int nr_entries;
};
-static DEFINE_PER_CPU(atomic_t, bp_refs);
+static DEFINE_PER_CPU(atomic_t, int3_refs);
static struct int3_patching_desc bp_desc;
static __always_inline
struct int3_patching_desc *try_get_desc(void)
{
- atomic_t *refs = this_cpu_ptr(&bp_refs);
+ atomic_t *refs = this_cpu_ptr(&int3_refs);
if (!raw_atomic_inc_not_zero(refs))
return NULL;
@@ -2493,7 +2493,7 @@ struct int3_patching_desc *try_get_desc(void)
static __always_inline void put_desc(void)
{
- atomic_t *refs = this_cpu_ptr(&bp_refs);
+ atomic_t *refs = this_cpu_ptr(&int3_refs);
smp_mb__before_atomic();
raw_atomic_dec(refs);
@@ -2529,9 +2529,9 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
* Having observed our INT3 instruction, we now must observe
* bp_desc with non-zero refcount:
*
- * bp_refs = 1 INT3
+ * int3_refs = 1 INT3
* WMB RMB
- * write INT3 if (bp_refs != 0)
+ * write INT3 if (int3_refs != 0)
*/
smp_rmb();
@@ -2638,7 +2638,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* ensure reading a non-zero refcount provides up to date bp_desc data.
*/
for_each_possible_cpu(i)
- atomic_set_release(per_cpu_ptr(&bp_refs, i), 1);
+ atomic_set_release(per_cpu_ptr(&int3_refs, i), 1);
/*
* Function tracing can enable thousands of places that need to be
@@ -2760,7 +2760,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* unused.
*/
for_each_possible_cpu(i) {
- atomic_t *refs = per_cpu_ptr(&bp_refs, i);
+ atomic_t *refs = per_cpu_ptr(&int3_refs, i);
if (unlikely(!atomic_dec_and_test(refs)))
atomic_cond_read_acquire(refs, !VAL);
--
2.45.2
Powered by blists - more mailing lists