lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240411-tso-v1-3-754f11abfbff@marcan.st>
Date: Thu, 11 Apr 2024 09:51:22 +0900
From: Hector Martin <marcan@...can.st>
To: Catalin Marinas <catalin.marinas@....com>, 
 Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>, 
 Mark Rutland <mark.rutland@....com>
Cc: Zayd Qumsieh <zayd_qumsieh@...le.com>, Justin Lu <ih_justin@...le.com>, 
 Ryan Houdek <Houdek.Ryan@...-emu.org>, Mark Brown <broonie@...nel.org>, 
 Ard Biesheuvel <ardb@...nel.org>, Mateusz Guzik <mjguzik@...il.com>, 
 Anshuman Khandual <anshuman.khandual@....com>, 
 Oliver Upton <oliver.upton@...ux.dev>, Miguel Luis <miguel.luis@...cle.com>, 
 Joey Gouly <joey.gouly@....com>, Christoph Paasch <cpaasch@...le.com>, 
 Kees Cook <keescook@...omium.org>, Sami Tolvanen <samitolvanen@...gle.com>, 
 Baoquan He <bhe@...hat.com>, Joel Granados <j.granados@...sung.com>, 
 Dawei Li <dawei.li@...ngroup.cn>, Andrew Morton <akpm@...ux-foundation.org>, 
 Florent Revest <revest@...omium.org>, David Hildenbrand <david@...hat.com>, 
 Stefan Roesch <shr@...kernel.io>, Andy Chiu <andy.chiu@...ive.com>, 
 Josh Triplett <josh@...htriplett.org>, Oleg Nesterov <oleg@...hat.com>, 
 Helge Deller <deller@....de>, Zev Weiss <zev@...ilderbeest.net>, 
 Ondrej Mosnacek <omosnace@...hat.com>, Miguel Ojeda <ojeda@...nel.org>, 
 linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org, 
 Asahi Linux <asahi@...ts.linux.dev>, Hector Martin <marcan@...can.st>
Subject: [PATCH 3/4] arm64: Introduce scaffolding to add ACTLR_EL1 to
 thread state

Some CPUs expose IMPDEF features in ACTLR_EL1 that can be meaningfully
controlled per-thread (like TSO control on Apple cores). Add the basic
scaffolding to save/restore this register as part of context switching.

This mechanism is disabled by default both by config symbol and via a
runtime check, which ensures it is never triggered unless the system is
known to need it for some feature (which also implies that the layout of
ACTLR_EL1 is uniform between all CPU core types).

Signed-off-by: Hector Martin <marcan@...can.st>
---
 arch/arm64/Kconfig                  |  3 +++
 arch/arm64/include/asm/cpufeature.h |  5 +++++
 arch/arm64/include/asm/processor.h  |  3 +++
 arch/arm64/kernel/process.c         | 25 +++++++++++++++++++++++++
 arch/arm64/kernel/setup.c           |  8 ++++++++
 5 files changed, 44 insertions(+)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f8e66fe44ff4..9b3593b34cce 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -408,6 +408,9 @@ config KASAN_SHADOW_OFFSET
 config UNWIND_TABLES
 	bool
 
+config ARM64_ACTLR_STATE
+	bool
+
 source "arch/arm64/Kconfig.platforms"
 
 menu "Kernel Features"
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index fb215b0e7529..46ab37f8f4d8 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -909,6 +909,11 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
 	return 8;
 }
 
+static __always_inline bool system_has_actlr_state(void)
+{
+	return false;
+}
+
 s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
 struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
 
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f77371232d8c..d43c5791a35e 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -184,6 +184,9 @@ struct thread_struct {
 	u64			sctlr_user;
 	u64			svcr;
 	u64			tpidr2_el0;
+#ifdef CONFIG_ARM64_ACTLR_STATE
+	u64			actlr;
+#endif
 };
 
 static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7920056bad3e..117f80e16aac 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -372,6 +372,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 		if (system_supports_tpidr2())
 			p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
 
+#ifdef CONFIG_ARM64_ACTLR_STATE
+		if (system_has_actlr_state())
+			p->thread.actlr = read_sysreg(actlr_el1);
+#endif
+
 		if (stack_start) {
 			if (is_compat_thread(task_thread_info(p)))
 				childregs->compat_sp = stack_start;
@@ -533,6 +538,25 @@ int arch_prctl_mem_model_set(struct task_struct *t, unsigned long val)
 }
 #endif
 
+#ifdef CONFIG_ARM64_ACTLR_STATE
+/*
+ * IMPDEF control register ACTLR_EL1 handling. Some CPUs use this to
+ * expose features that can be controlled by userspace.
+ */
+static void actlr_thread_switch(struct task_struct *next)
+{
+	if (!system_has_actlr_state())
+		return;
+
+	current->thread.actlr = read_sysreg(actlr_el1);
+	write_sysreg(next->thread.actlr, actlr_el1);
+}
+#else
+static inline void actlr_thread_switch(struct task_struct *next)
+{
+}
+#endif
+
 /*
  * Thread switching.
  */
@@ -550,6 +574,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
 	ssbs_thread_switch(next);
 	erratum_1418040_thread_switch(next);
 	ptrauth_thread_switch_user(next);
+	actlr_thread_switch(next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 65a052bf741f..35342f633a85 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -359,6 +359,14 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
 	 */
 	init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
 #endif
+#ifdef CONFIG_ARM64_ACTLR_STATE
+	/* Store the boot CPU ACTLR_EL1 value as the default. This will only
+	 * be actually restored during context switching iff the platform is
+	 * known to use ACTLR_EL1 for exposable features and its layout is
+	 * known to be the same on all CPUs.
+	 */
+	init_task.thread.actlr = read_sysreg(actlr_el1);
+#endif
 
 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"

-- 
2.44.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ