[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87r12iurej.ffs@tglx>
Date: Mon, 18 Jul 2022 21:55:48 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Linus Torvalds <torvalds@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Andrew Cooper <Andrew.Cooper3@...rix.com>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Johannes Wikner <kwikner@...z.ch>,
Alyssa Milburn <alyssa.milburn@...ux.intel.com>,
Jann Horn <jannh@...gle.com>, "H.J. Lu" <hjl.tools@...il.com>,
Joao Moreira <joao.moreira@...el.com>,
Joseph Nuzman <joseph.nuzman@...el.com>,
Steven Rostedt <rostedt@...dmis.org>,
Juergen Gross <jgross@...e.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
Subject: Re: [patch 00/38] x86/retbleed: Call depth tracking mitigation
On Sun, Jul 17 2022 at 01:17, Thomas Gleixner wrote:
> For 4 RET pathes randomized with randomize_kstack_offset=y and RSP bit 3, 6, 5:
>
> IBRS stuff stuff(pad) confuse
> microbench: +37.20% +18.46% +15.47% +7.46%
> sockperf 14 bytes: -23.76% -19.26% -14.31% -16.80%
> sockperf 1472 bytes: -22.51% -18.40% -12.25% -15.95%
>
> So for the more randomized variant sockperf tanks and is already slower
> than stuffing with thunks in the compiler provided padding space.
>
> I send out a patch in reply to this series which implements that variant,
> but there needs to be input from the security researchers how protective
> this is. If we could get away with 2 RET pathes (perhaps multiple instances
> with different bits), that would be amazing.
Here is goes.
---
Subject: x86/retbleed: Add confusion mitigation
From: Thomas Gleixner <tglx@...utronix.de>
Date: Fri, 15 Jul 2022 11:41:05 +0200
- NOT FOR INCLUSION -
Experimental option to confuse the return path by randomization.
The following command line options enable this:
retbleed=confuse 4 return pathes
retbleed=confuse,4 4 return pathes
retbleed=confuse,3 3 return pathes
retbleed=confuse,2 2 return pathes
This need scrunity by security researchers.
Not-Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
arch/x86/Kconfig | 12 ++++++
arch/x86/include/asm/nospec-branch.h | 23 +++++++++++
arch/x86/kernel/cpu/bugs.c | 41 +++++++++++++++++++++
arch/x86/lib/retpoline.S | 68 +++++++++++++++++++++++++++++++++++
include/linux/randomize_kstack.h | 6 +++
kernel/entry/common.c | 3 +
6 files changed, 153 insertions(+)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2538,6 +2538,18 @@ config CALL_THUNKS_DEBUG
Only enable this, when you are debugging call thunks as this
creates a noticable runtime overhead. If unsure say N.
+config RETURN_CONFUSION
+ bool "Mitigate RSB underflow with return confusion"
+ depends on CPU_SUP_INTEL && RETHUNK && RANDOMIZE_KSTACK_OFFSET
+ default y
+ help
+ Compile the kernel with return path confusion to mitigate the
+ Intel SKL Return-Speculation-Buffer (RSB) underflow issue. The
+ mitigation is off by default and needs to be enabled on the
+ kernel command line via the retbleed=confuse option. For
+ non-affected systems the overhead of this option is marginal as
+ the return thunk jumps are patched to direct ret instructions.
+
config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry"
depends on CPU_SUP_AMD
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -312,6 +312,29 @@ static inline void x86_set_skl_return_th
#endif
+#ifdef CONFIG_RETURN_CONFUSION
+extern void __x86_return_confused_skl2(void);
+extern void __x86_return_confused_skl3(void);
+extern void __x86_return_confused_skl4(void);
+
+static inline void x86_set_skl_confused_return_thunk(int which)
+{
+ switch (which) {
+ case 2:
+ x86_return_thunk = &__x86_return_confused_skl2;
+ break;
+ case 3:
+ x86_return_thunk = &__x86_return_confused_skl3;
+ break;
+ case 4:
+ x86_return_thunk = &__x86_return_confused_skl4;
+ break;
+ }
+}
+#else
+static inline void x86_set_skl_confused_return_thunk(void) { }
+#endif
+
#ifdef CONFIG_RETPOLINE
#define GEN(reg) \
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
+#include <linux/randomize_kstack.h>
#include <linux/sched/smt.h>
#include <linux/pgtable.h>
#include <linux/bpf.h>
@@ -785,6 +786,7 @@ enum retbleed_mitigation {
RETBLEED_MITIGATION_IBRS,
RETBLEED_MITIGATION_EIBRS,
RETBLEED_MITIGATION_STUFF,
+ RETBLEED_MITIGATION_CONFUSE,
};
enum retbleed_mitigation_cmd {
@@ -793,6 +795,7 @@ enum retbleed_mitigation_cmd {
RETBLEED_CMD_UNRET,
RETBLEED_CMD_IBPB,
RETBLEED_CMD_STUFF,
+ RETBLEED_CMD_CONFUSE,
};
const char * const retbleed_strings[] = {
@@ -802,6 +805,7 @@ const char * const retbleed_strings[] =
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
[RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
+ [RETBLEED_MITIGATION_CONFUSE] = "Mitigation: Return confusion",
};
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
@@ -810,6 +814,7 @@ static enum retbleed_mitigation_cmd retb
RETBLEED_CMD_AUTO;
static int __ro_after_init retbleed_nosmt = false;
+static int __ro_after_init rethunk_confuse_skl = 4;
static int __init retbleed_parse_cmdline(char *str)
{
@@ -833,8 +838,19 @@ static int __init retbleed_parse_cmdline
retbleed_cmd = RETBLEED_CMD_IBPB;
} else if (!strcmp(str, "stuff")) {
retbleed_cmd = RETBLEED_CMD_STUFF;
+ } else if (!strcmp(str, "confuse")) {
+ retbleed_cmd = RETBLEED_CMD_CONFUSE;
} else if (!strcmp(str, "nosmt")) {
retbleed_nosmt = true;
+ } else if (retbleed_cmd == RETBLEED_CMD_CONFUSE &&
+ !kstrtouint(str, 10, &rethunk_confuse_skl)) {
+
+ if (rethunk_confuse_skl < 2 ||
+ rethunk_confuse_skl > 4) {
+ pr_err("Ignoring out-of-bound stuff count (%d).",
+ rethunk_confuse_skl);
+ rethunk_confuse_skl = 4;
+ }
} else {
pr_err("Ignoring unknown retbleed option (%s).", str);
}
@@ -896,6 +912,25 @@ static void __init retbleed_select_mitig
}
break;
+ case RETBLEED_CMD_CONFUSE:
+ if (IS_ENABLED(CONFIG_RETURN_CONFUSION) &&
+ spectre_v2_enabled == SPECTRE_V2_RETPOLINE &&
+ random_kstack_offset_enabled()) {
+ retbleed_mitigation = RETBLEED_MITIGATION_CONFUSE;
+ } else {
+ if (IS_ENABLED(CONFIG_RETURN_CONFUSION) &&
+ random_kstack_offset_enabled())
+ pr_err("WARNING: retbleed=confuse depends on randomize_kstack_offset=y\n");
+ else if (IS_ENABLED(CONFIG_RETURN_CONFUSION) &&
+ spectre_v2_enabled != SPECTRE_V2_RETPOLINE)
+ pr_err("WARNING: retbleed=confuse depends on spectre_v2=retpoline\n");
+ else
+ pr_err("WARNING: kernel not compiled with RETURN_CONFUSION.\n");
+
+ goto do_cmd_auto;
+ }
+ break;
+
do_cmd_auto:
case RETBLEED_CMD_AUTO:
default:
@@ -939,6 +974,11 @@ static void __init retbleed_select_mitig
x86_set_skl_return_thunk();
break;
+ case RETBLEED_MITIGATION_CONFUSE:
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ x86_set_skl_confused_return_thunk(rethunk_confuse_skl);
+ break;
+
default:
break;
}
@@ -1389,6 +1429,7 @@ static void __init spectre_v2_select_mit
boot_cpu_has_bug(X86_BUG_RETBLEED) &&
retbleed_cmd != RETBLEED_CMD_OFF &&
retbleed_cmd != RETBLEED_CMD_STUFF &&
+ retbleed_cmd != RETBLEED_CMD_CONFUSE &&
boot_cpu_has(X86_FEATURE_IBRS) &&
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
mode = SPECTRE_V2_IBRS;
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -230,3 +230,71 @@ SYM_FUNC_START(__x86_return_skl)
SYM_FUNC_END(__x86_return_skl)
#endif /* CONFIG_CALL_DEPTH_TRACKING */
+
+#ifdef CONFIG_RETURN_CONFUSION
+ .align 64
+SYM_FUNC_START(__x86_return_confused_skl4)
+ ANNOTATE_NOENDBR
+ testq $3, %rsp
+ jz 1f
+
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+1:
+ testq $6, %rsp
+ jz 2f
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+2:
+ testq $5, %rsp
+ jz 3f
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+3:
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+SYM_FUNC_END(__x86_return_confused_skl4)
+
+ .align 64
+SYM_FUNC_START(__x86_return_confused_skl3)
+ ANNOTATE_NOENDBR
+ testq $3, %rsp
+ jz 1f
+
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+1:
+ testq $6, %rsp
+ jz 2f
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+2:
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+SYM_FUNC_END(__x86_return_confused_skl3)
+
+ .align 64
+SYM_FUNC_START(__x86_return_confused_skl2)
+ ANNOTATE_NOENDBR
+ testq $3, %rsp
+ jz 1f
+
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+1:
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+SYM_FUNC_END(__x86_return_confused_skl2)
+
+#endif /* CONFIG_RETURN_CONFUSION */
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -84,9 +84,15 @@ DECLARE_PER_CPU(u32, kstack_offset);
raw_cpu_write(kstack_offset, offset); \
} \
} while (0)
+
+#define random_kstack_offset_enabled() \
+ static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
+ &randomize_kstack_offset)
+
#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#define add_random_kstack_offset() do { } while (0)
#define choose_random_kstack_offset(rand) do { } while (0)
+#define random_kstack_offset_enabled() false
#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#endif
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -298,6 +298,7 @@ void syscall_exit_to_user_mode_work(stru
noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
{
+ add_random_kstack_offset();
__enter_from_user_mode(regs);
}
@@ -444,6 +445,8 @@ irqentry_state_t noinstr irqentry_nmi_en
{
irqentry_state_t irq_state;
+ if (user_mode(regs))
+ add_random_kstack_offset();
irq_state.lockdep = lockdep_hardirqs_enabled();
__nmi_enter();
Powered by blists - more mailing lists