[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d9f8753c-7e12-a81b-37e8-e7345f2d0150@oracle.com>
Date: Thu, 7 May 2020 16:15:41 +0200
From: Alexandre Chartre <alexandre.chartre@...cle.com>
To: Thomas Gleixner <tglx@...utronix.de>,
LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, "Paul E. McKenney" <paulmck@...nel.org>,
Andy Lutomirski <luto@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Sean Christopherson <sean.j.christopherson@...el.com>,
Masami Hiramatsu <mhiramat@...nel.org>,
Petr Mladek <pmladek@...e.com>,
Steven Rostedt <rostedt@...dmis.org>,
Joel Fernandes <joel@...lfernandes.org>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Juergen Gross <jgross@...e.com>,
Brian Gerst <brgerst@...il.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Will Deacon <will@...nel.org>
Subject: Re: [patch V4 part 2 10/18] x86/entry/64: Check IF in
__preempt_enable_notrace() thunk
On 5/5/20 3:41 PM, Thomas Gleixner wrote:
> The preempt_enable_notrace() ASM thunk is called from tracing, entry code
> RCU and other places which are already in or going to be in the noinstr
> section which protects sensitve code from being instrumented.
typo: "sensitve"
alex.
> Calls out of these sections happen with interrupts disabled, which is
> handled in C code, but the push regs, call, pop regs sequence can be
> completely avoided in this case.
>
> This is also a preparatory step for annotating the call from the thunk to
> preempt_enable_notrace() safe from a noinstr section.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> ---
> arch/x86/entry/thunk_64.S | 27 +++++++++++++++++++++++----
> arch/x86/include/asm/irqflags.h | 3 +--
> arch/x86/include/asm/paravirt.h | 3 +--
> 3 files changed, 25 insertions(+), 8 deletions(-)
>
> --- a/arch/x86/entry/thunk_64.S
> +++ b/arch/x86/entry/thunk_64.S
> @@ -9,10 +9,28 @@
> #include "calling.h"
> #include <asm/asm.h>
> #include <asm/export.h>
> +#include <asm/irqflags.h>
> +
> +.code64
>
> /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
> - .macro THUNK name, func, put_ret_addr_in_rdi=0
> + .macro THUNK name, func, put_ret_addr_in_rdi=0, check_if=0
> SYM_FUNC_START_NOALIGN(\name)
> +
> + .if \check_if
> + /*
> + * Check for interrupts disabled right here. No point in
> + * going all the way down
> + */
> + pushq %rax
> + SAVE_FLAGS(CLBR_RAX)
> + testl $X86_EFLAGS_IF, %eax
> + popq %rax
> + jnz 1f
> + ret
> +1:
> + .endif
> +
> pushq %rbp
> movq %rsp, %rbp
>
> @@ -38,14 +56,15 @@ SYM_FUNC_END(\name)
> .endm
>
> #ifdef CONFIG_TRACE_IRQFLAGS
> - THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
> - THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
> + THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller, put_ret_addr_in_rdi=1
> + THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller, put_ret_addr_in_rdi=1
> #endif
>
> #ifdef CONFIG_PREEMPTION
> THUNK preempt_schedule_thunk, preempt_schedule
> - THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
> EXPORT_SYMBOL(preempt_schedule_thunk)
> +
> + THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace, check_if=1
> EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
> #endif
>
> --- a/arch/x86/include/asm/irqflags.h
> +++ b/arch/x86/include/asm/irqflags.h
> @@ -127,9 +127,8 @@ static inline notrace unsigned long arch
> #define DISABLE_INTERRUPTS(x) cli
>
> #ifdef CONFIG_X86_64
> -#ifdef CONFIG_DEBUG_ENTRY
> +
> #define SAVE_FLAGS(x) pushfq; popq %rax
> -#endif
>
> #define SWAPGS swapgs
> /*
> --- a/arch/x86/include/asm/paravirt.h
> +++ b/arch/x86/include/asm/paravirt.h
> @@ -907,14 +907,13 @@ extern void default_banner(void);
> ANNOTATE_RETPOLINE_SAFE; \
> jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
>
> -#ifdef CONFIG_DEBUG_ENTRY
> #define SAVE_FLAGS(clobbers) \
> PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
> PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
> ANNOTATE_RETPOLINE_SAFE; \
> call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
> PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
> -#endif
> +
> #endif /* CONFIG_PARAVIRT_XXL */
> #endif /* CONFIG_X86_64 */
>
>
Powered by blists - more mailing lists