[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20210914103027.53565-11-mark.rutland@arm.com>
Date: Tue, 14 Sep 2021 11:30:27 +0100
From: Mark Rutland <mark.rutland@....com>
To: linux-kernel@...r.kernel.org
Cc: benh@...nel.crashing.org, boqun.feng@...il.com, bp@...en8.de,
catalin.marinas@....com, dvyukov@...gle.com, elver@...gle.com,
ink@...assic.park.msu.ru, jonas@...thpole.se,
juri.lelli@...hat.com, linux@...linux.org.uk, luto@...nel.org,
mark.rutland@....com, mattst88@...il.com, michal.simek@...inx.com,
mingo@...hat.com, mpe@...erman.id.au, paulmck@...nel.org,
paulus@...ba.org, peterz@...radead.org, rth@...ddle.net,
shorne@...il.com, stefan.kristiansson@...nalahti.fi,
tglx@...utronix.de, vincent.guittot@...aro.org, will@...nel.org
Subject: [PATCH v5 10/10] x86: snapshot thread flags
Some thread flags can be set remotely, and so even when IRQs are
disabled, the flags can change under our feet. Generally this is
unlikely to cause a problem in practice, but it is somewhat unsound, and
KCSAN will legitimately warn that there is a data race.
To avoid such issues, a snapshot of the flags has to be taken prior to
using them. Some places already use READ_ONCE() for that, others do not.
Convert them all to the new flag accessor helpers.
Signed-off-by: Mark Rutland <mark.rutland@....com>
Reviewed-by: Thomas Gleixner <tglx@...utronix.de>
Acked-by: Paul E. McKenney <paulmck@...nel.org>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Ingo Molnar <mingo@...hat.com>
---
arch/x86/kernel/process.c | 8 ++++----
arch/x86/kernel/process.h | 6 +++---
arch/x86/mm/tlb.c | 2 +-
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 1d9463e3096b..0b9a1f2ccfb3 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -348,7 +348,7 @@ void arch_setup_new_exec(void)
clear_thread_flag(TIF_SSBD);
task_clear_spec_ssb_disable(current);
task_clear_spec_ssb_noexec(current);
- speculation_ctrl_update(task_thread_info(current)->flags);
+ speculation_ctrl_update(read_thread_flags());
}
}
@@ -600,7 +600,7 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
}
/* Return the updated threadinfo flags*/
- return task_thread_info(tsk)->flags;
+ return read_task_thread_flags(tsk);
}
void speculation_ctrl_update(unsigned long tif)
@@ -636,8 +636,8 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
{
unsigned long tifp, tifn;
- tifn = READ_ONCE(task_thread_info(next_p)->flags);
- tifp = READ_ONCE(task_thread_info(prev_p)->flags);
+ tifn = read_task_thread_flags(next_p);
+ tifp = read_task_thread_flags(prev_p);
switch_to_bitmap(tifp);
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
index 1d0797b2338a..0b1be8685b49 100644
--- a/arch/x86/kernel/process.h
+++ b/arch/x86/kernel/process.h
@@ -13,9 +13,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
static inline void switch_to_extra(struct task_struct *prev,
struct task_struct *next)
{
- unsigned long next_tif = task_thread_info(next)->flags;
- unsigned long prev_tif = task_thread_info(prev)->flags;
-
+ unsigned long next_tif = read_task_thread_flags(next);
+ unsigned long prev_tif = read_task_thread_flags(prev);
+
if (IS_ENABLED(CONFIG_SMP)) {
/*
* Avoid __switch_to_xtra() invocation when conditional
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 59ba2968af1b..92bb03b9ceb5 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -361,7 +361,7 @@ static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm,
static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
{
- unsigned long next_tif = task_thread_info(next)->flags;
+ unsigned long next_tif = read_task_thread_flags(next);
unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK;
/*
--
2.11.0
Powered by blists - more mailing lists