[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20181018005420.82993-6-namit@vmware.com>
Date: Wed, 17 Oct 2018 17:54:20 -0700
From: Nadav Amit <namit@...are.com>
To: Ingo Molnar <mingo@...hat.com>
CC: Andy Lutomirski <luto@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
"H . Peter Anvin " <hpa@...or.com>,
Thomas Gleixner <tglx@...utronix.de>,
<linux-kernel@...r.kernel.org>, Nadav Amit <nadav.amit@...il.com>,
<x86@...nel.org>, Borislav Petkov <bp@...en8.de>,
David Woodhouse <dwmw@...zon.co.uk>,
Nadav Amit <namit@...are.com>
Subject: [RFC PATCH 5/5] x86: relpoline: disabling interface
In certain cases it is beneficial not to use indirect branch promotion.
One such case is seccomp, which may hold multiple filters and different
ones for different processes. The interface indicates to the macro not
to add a relpoline to the the indirect branch.
Signed-off-by: Nadav Amit <namit@...are.com>
---
arch/x86/include/asm/nospec-branch.h | 25 +++++++++++++++++++++++++
kernel/seccomp.c | 2 ++
2 files changed, 27 insertions(+)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 360caad7a890..8b10e8165069 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -246,7 +246,21 @@
.endr
.endm
+.L_DISABLE_INDIRECT_BRANCH_OPT = 0
+
+.macro disable_indirect_branch_opt
+_DISABLE_INDIRECT_BRANCH_OPT = 1
+.endm
+
+.macro enable_indirect_branch_opt
+_DISABLE_INDIRECT_BRANCH_OPT = 0
+.endm
+
.macro call v:vararg
+.ifc _DISABLE_INDIRECT_BRANCH_OPT, "1"
+ # The pseudo-prefix is just to avoid expanding the macro
+ {disp8} call \v
+.else
retpoline = 0
.irp reg_it,ARCH_REG_NAMES
.ifc "\v", "__x86_indirect_thunk_\reg_it"
@@ -257,6 +271,7 @@
.if retpoline == 0
{disp8} call \v
.endif
+.endif
.endm
#else /* __ASSEMBLY__ */
@@ -409,6 +424,16 @@ struct relpoline_entry {
extern const void *indirect_thunks[16];
extern const void *save_relpoline_funcs[16];
+static inline void enable_relpolines(void)
+{
+ asm volatile("enable_indirect_branch_opt");
+}
+
+static inline void disable_relpolines(void)
+{
+ asm volatile("disable_indirect_branch_opt");
+}
+
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index fd023ac24e10..c3fbeddfa8fa 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -207,6 +207,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
* All filters in the list are evaluated and the lowest BPF return
* value always takes priority (ignoring the DATA).
*/
+ disable_relpolines();
for (; f; f = f->prev) {
u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
@@ -215,6 +216,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
*match = f;
}
}
+ enable_relpolines();
return ret;
}
#endif /* CONFIG_SECCOMP_FILTER */
--
2.17.1
Powered by blists - more mailing lists