[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200308222609.411187252@linutronix.de>
Date: Sun, 08 Mar 2020 23:24:04 +0100
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Steven Rostedt <rostedt@...dmis.org>,
Brian Gerst <brgerst@...il.com>,
Juergen Gross <jgross@...e.com>,
Frederic Weisbecker <frederic@...nel.org>,
Alexandre Chartre <alexandre.chartre@...cle.com>
Subject: [patch part-II V2 05/13] x86/entry/common: Consolidate syscall entry code
All syscall entry points call enter_from_user_mode() and local_irq_enable().
Move that into an inline helper so the interrupt tracing can be moved into
that helper later instead of sprinkling it all over the place.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Frederic Weisbecker <frederic@...nel.org>
Reviewed-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
V2: Rename helper to syscall_entry_apply_fixups()
---
arch/x86/entry/common.c | 48 +++++++++++++++++++++++++++++++++---------------
1 file changed, 33 insertions(+), 15 deletions(-)
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -50,6 +50,18 @@ NOKPROBE_SYMBOL(enter_from_user_mode);
static inline void enter_from_user_mode(void) {}
#endif
+/*
+ * All syscall entry variants call with interrupts disabled.
+ *
+ * Invoke context tracking if enabled and enable interrupts for further
+ * processing.
+ */
+static __always_inline void syscall_entry_apply_fixups(void)
+{
+ enter_from_user_mode();
+ local_irq_enable();
+}
+
static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
{
#ifdef CONFIG_X86_64
@@ -280,13 +292,11 @@ static void syscall_slow_exit_work(struc
}
#ifdef CONFIG_X86_64
-__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+static __always_inline
+void do_syscall_64_irqs_on(unsigned long nr, struct pt_regs *regs)
{
- struct thread_info *ti;
+ struct thread_info *ti = current_thread_info();
- enter_from_user_mode();
- local_irq_enable();
- ti = current_thread_info();
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
nr = syscall_trace_enter(regs);
@@ -304,6 +314,12 @@ static void syscall_slow_exit_work(struc
syscall_return_slowpath(regs);
}
+
+__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+{
+ syscall_entry_apply_fixups();
+ do_syscall_64_irqs_on(nr, regs);
+}
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
@@ -356,19 +372,17 @@ static __always_inline void do_syscall_3
/* Handles int $0x80 */
__visible void do_int80_syscall_32(struct pt_regs *regs)
{
- enter_from_user_mode();
- local_irq_enable();
+ syscall_entry_apply_fixups();
do_syscall_32_irqs_on(regs);
}
-/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
-__visible long do_fast_syscall_32(struct pt_regs *regs)
+/* Fast syscall 32bit variant */
+static __always_inline long do_fast_syscall_32_irqs_on(struct pt_regs *regs)
{
/*
* Called using the internal vDSO SYSENTER/SYSCALL32 calling
* convention. Adjust regs so it looks like we entered using int80.
*/
-
unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
vdso_image_32.sym_int80_landing_pad;
@@ -379,10 +393,6 @@ static __always_inline void do_syscall_3
*/
regs->ip = landing_pad;
- enter_from_user_mode();
-
- local_irq_enable();
-
/* Fetch EBP from where the vDSO stashed it. */
if (
#ifdef CONFIG_X86_64
@@ -438,4 +448,12 @@ static __always_inline void do_syscall_3
(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
#endif
}
-#endif
+
+/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
+__visible long do_fast_syscall_32(struct pt_regs *regs)
+{
+ syscall_entry_apply_fixups();
+ return do_fast_syscall_32_irqs_on(regs);
+}
+
+#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
Powered by blists - more mailing lists