lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Tue,  9 Jul 2019 16:52:13 +0530
From:   Neeraj Upadhyay <neeraju@...eaurora.org>
To:     will@...nel.org, mark.rutland@....com, marc.zyngier@....com,
        julien.thierry@....com, tglx@...utronix.de
Cc:     linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
        linux-arm-msm@...r.kernel.org, gkohli@...eaurora.org,
        parthd@...eaurora.org, Neeraj Upadhyay <neeraju@...eaurora.org>
Subject: [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry

For cpus which do not support pstate.ssbs feature, el0
might not retain spsr.ssbs. This is problematic, if this
task migrates to a cpu supporting this feature, thus
relying on its state to be correct. On kernel entry,
explicitly set spsr.ssbs, so that speculation is enabled
for el0, when this task migrates to a cpu supporting
ssbs feature. Restoring state at kernel entry ensures
that el0 ssbs state is always consistent while we are
in el1.

As alternatives are applied by boot cpu, at the end of smp
init, presence/absence of ssbs feature on boot cpu, is used
for deciding, whether the capability is uniformly provided.

Signed-off-by: Neeraj Upadhyay <neeraju@...eaurora.org>
---
 arch/arm64/kernel/cpu_errata.c | 16 ++++++++++++++++
 arch/arm64/kernel/entry.S      | 26 +++++++++++++++++++++++++-
 2 files changed, 41 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ca11ff7..c84a56d 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -336,6 +336,22 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
 }
 
+void __init arm64_restore_ssbs_state(struct alt_instr *alt,
+				     __le32 *origptr, __le32 *updptr,
+				     int nr_inst)
+{
+	BUG_ON(nr_inst != 1);
+	/*
+	 * Only restore EL0 SSBS state on EL1 entry if cpu does not
+	 * support the capability and capability is present for at
+	 * least one cpu and if the SSBD state allows it to
+	 * be changed.
+	 */
+	if (!this_cpu_has_cap(ARM64_SSBS) && cpus_have_cap(ARM64_SSBS) &&
+	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
 void arm64_set_ssbd_mitigation(bool state)
 {
 	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9cdc459..7e79305 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -143,6 +143,25 @@ alternative_cb_end
 #endif
 	.endm
 
+	// This macro updates spsr. It also corrupts the condition
+	// codes state.
+	.macro	restore_ssbs_state, saved_spsr, tmp
+#ifdef CONFIG_ARM64_SSBD
+alternative_cb	arm64_restore_ssbs_state
+	b	.L__asm_ssbs_skip\@
+alternative_cb_end
+	ldr	\tmp, [tsk, #TSK_TI_FLAGS]
+	tbnz	\tmp, #TIF_SSBD, .L__asm_ssbs_skip\@
+	tst	\saved_spsr, #PSR_MODE32_BIT	// native task?
+	b.ne	.L__asm_ssbs_compat\@
+	orr	\saved_spsr, \saved_spsr, #PSR_SSBS_BIT
+	b	.L__asm_ssbs_skip\@
+.L__asm_ssbs_compat\@:
+	orr	\saved_spsr, \saved_spsr, #PSR_AA32_SSBS_BIT
+.L__asm_ssbs_skip\@:
+#endif
+	.endm
+
 	.macro	kernel_entry, el, regsize = 64
 	.if	\regsize == 32
 	mov	w0, w0				// zero upper 32 bits of x0
@@ -182,8 +201,13 @@ alternative_cb_end
 	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
 	.endif /* \el == 0 */
-	mrs	x22, elr_el1
 	mrs	x23, spsr_el1
+
+	.if	\el == 0
+	restore_ssbs_state x23, x22
+	.endif
+
+	mrs	x22, elr_el1
 	stp	lr, x21, [sp, #S_LR]
 
 	/*
-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of the Code Aurora Forum, hosted by The Linux Foundation

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ