It's worth to avoid the extra irq_enter()/irq_exit() pair in the case that the reschedule interrupt tracepoints are disabled. Use the static key which indicates that exception tracing is enabled. For now this key is global. It will be optimized in a later step. Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/hw_irq.h | 2 +- arch/x86/kernel/smp.c | 40 ++++++++++++++++------------------------ 2 files changed, 17 insertions(+), 25 deletions(-) --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -48,7 +48,7 @@ extern asmlinkage void call_function_sin #ifdef CONFIG_TRACING /* Interrupt handlers registered during init_IRQ */ -extern void trace_reschedule_interrupt(void); +#define trace_reschedule_interrupt reschedule_interrupt #define trace_call_function_interrupt call_function_interrupt #define trace_call_function_single_interrupt call_function_single_interrupt #define trace_thermal_interrupt thermal_interrupt --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -254,37 +254,29 @@ static void native_stop_other_cpus(int w } /* - * Reschedule call back. + * Reschedule call back. KVM uses this interrupt to force a cpu out of + * guest mode */ -static inline void __smp_reschedule_interrupt(void) -{ - inc_irq_stat(irq_resched_count); - scheduler_ipi(); -} - __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); - __smp_reschedule_interrupt(); - /* - * KVM uses this interrupt to force a cpu out of guest mode - */ -} -__visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs) -{ - /* - * Need to call irq_enter() before calling the trace point. - * __smp_reschedule_interrupt() calls irq_enter/exit() too (in - * scheduler_ipi(). This is OK, since those functions are allowed - * to nest. - */ - ipi_entering_ack_irq(); - trace_reschedule_entry(RESCHEDULE_VECTOR); + if (trace_irqvectors_enabled()) { + /* + * scheduler_ipi() might call irq_enter() as well, but + * nested calls are fine. + */ + irq_enter(); + trace_reschedule_entry(RESCHEDULE_VECTOR); + } + inc_irq_stat(irq_resched_count); scheduler_ipi(); - trace_reschedule_exit(RESCHEDULE_VECTOR); - exiting_irq(); + + if (trace_irqvectors_enabled()) { + trace_reschedule_exit(RESCHEDULE_VECTOR); + irq_exit(); + } } __visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)