[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210609122001.18277-10-mark.rutland@arm.com>
Date: Wed, 9 Jun 2021 13:20:00 +0100
From: Mark Rutland <mark.rutland@....com>
To: linux-kernel@...r.kernel.org
Cc: benh@...nel.crashing.org, boqun.feng@...il.com, bp@...en8.de,
catalin.marinas@....com, dvyukov@...gle.com, elver@...gle.com,
ink@...assic.park.msu.ru, jonas@...thpole.se,
juri.lelli@...hat.com, linux@...linux.org.uk, luto@...nel.org,
mark.rutland@....com, mattst88@...il.com, mingo@...hat.com,
monstr@...str.eu, mpe@...erman.id.au, paulmck@...nel.org,
paulus@...ba.org, peterz@...radead.org, rth@...ddle.net,
shorne@...il.com, stefan.kristiansson@...nalahti.fi,
tglx@...utronix.de, vincent.guittot@...aro.org, will@...nel.org
Subject: [RFC PATCH 09/10] powerpc: snapshot thread flags
Some thread flags can be set remotely, and so even when IRQs are
disabled, the flags can change under our feet. Generally this is
unlikely to cause a problem in practice, but it is somewhat unsound, and
KCSAN will legitimately warn that there is a data race.
To avoid such issues, we should snapshot the flags prior to using them.
Let's use the new helpers to do so on powerpc.
Signed-off-by: Mark Rutland <mark.rutland@....com>
Cc: Benjamin Herrenschmidt <benh@...nel.crashing.org>
Cc: Michael Ellerman <mpe@...erman.id.au>
Cc: Paul Mackerras <paulus@...ba.org>
---
arch/powerpc/kernel/interrupt.c | 16 ++++++++--------
arch/powerpc/kernel/ptrace/ptrace.c | 3 +--
2 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index e0938ba298f2..f6150a4b3d1b 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -92,7 +92,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
local_irq_enable();
- if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
+ if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
if (unlikely(trap_is_unsupported_scv(regs))) {
/* Unsupported scv vector */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@@ -257,7 +257,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
/* Check whether the syscall is issued inside a restartable sequence */
rseq_syscall(regs);
- ti_flags = current_thread_info()->flags;
+ ti_flags = read_thread_flags();
if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
@@ -284,7 +284,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
local_irq_disable();
again:
- ti_flags = READ_ONCE(current_thread_info()->flags);
+ ti_flags = read_thread_flags();
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable();
if (ti_flags & _TIF_NEED_RESCHED) {
@@ -300,7 +300,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
do_notify_resume(regs, ti_flags);
}
local_irq_disable();
- ti_flags = READ_ONCE(current_thread_info()->flags);
+ ti_flags = read_thread_flags();
}
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
@@ -373,7 +373,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
local_irq_save(flags);
again:
- ti_flags = READ_ONCE(current_thread_info()->flags);
+ ti_flags = read_thread_flags();
while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
local_irq_enable(); /* returning to user: may enable */
if (ti_flags & _TIF_NEED_RESCHED) {
@@ -384,7 +384,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
do_notify_resume(regs, ti_flags);
}
local_irq_disable();
- ti_flags = READ_ONCE(current_thread_info()->flags);
+ ti_flags = read_thread_flags();
}
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
@@ -450,7 +450,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
kuap = kuap_get_and_assert_locked();
- if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
+ if (unlikely(read_thread_flags() & _TIF_EMULATE_STACK_STORE)) {
clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
ret = 1;
}
@@ -463,7 +463,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
again:
if (IS_ENABLED(CONFIG_PREEMPT)) {
/* Return to preemptible kernel context */
- if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) {
+ if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
if (preempt_count() == 0)
preempt_schedule_irq();
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 0a0a33eb0d28..d174570a144e 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -260,8 +260,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
u32 flags;
- flags = READ_ONCE(current_thread_info()->flags) &
- (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
+ flags = read_thread_flags() & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
if (flags) {
int rc = tracehook_report_syscall_entry(regs);
--
2.11.0
Powered by blists - more mailing lists