lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-id: <20240410211652.16640-3-zayd_qumsieh@apple.com>
Date: Wed, 10 Apr 2024 14:16:40 -0700
From: Zayd Qumsieh <zayd_qumsieh@...le.com>
To: zayd_qumsieh@...le.com
Cc: Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>,
 Mark Brown <broonie@...nel.org>, Ard Biesheuvel <ardb@...nel.org>,
 Mark Rutland <mark.rutland@....com>, Mateusz Guzik <mjguzik@...il.com>,
 Anshuman Khandual <anshuman.khandual@....com>, Marc Zyngier <maz@...nel.org>,
 Oliver Upton <oliver.upton@...ux.dev>, Miguel Luis <miguel.luis@...cle.com>,
 Joey Gouly <joey.gouly@....com>, Christoph Paasch <cpaasch@...le.com>,
 Kees Cook <keescook@...omium.org>, Sami Tolvanen <samitolvanen@...gle.com>,
 Baoquan He <bhe@...hat.com>, Lecopzer Chen <lecopzer.chen@...iatek.com>,
 Joel Granados <j.granados@...sung.com>, Dawei Li <dawei.li@...ngroup.cn>,
 Andrew Morton <akpm@...ux-foundation.org>,
 Florent Revest <revest@...omium.org>, David Hildenbrand <david@...hat.com>,
 Stefan Roesch <shr@...kernel.io>, Andy Chiu <andy.chiu@...ive.com>,
 Josh Triplett <josh@...htriplett.org>, Oleg Nesterov <oleg@...hat.com>,
 Helge Deller <deller@....de>, Zev Weiss <zev@...ilderbeest.net>,
 Ondrej Mosnacek <omosnace@...hat.com>, Miguel Ojeda <ojeda@...nel.org>,
 linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [PATCH 2/3] tso: aarch64: context-switch tso bit on thread switch

Add support for context-switching the tso bit when the thread
switches. This allows per-thread setting of the tso bit, and prepares
future work to allow userspace to set the tso bit of their thread
at will.

Signed-off-by: Zayd Qumsieh <zayd_qumsieh@...le.com>
---
 arch/arm64/include/asm/processor.h | 4 ++++
 arch/arm64/include/asm/tso.h       | 1 +
 arch/arm64/kernel/process.c        | 9 +++++++++
 arch/arm64/kernel/tso.c            | 9 +++++++++
 4 files changed, 23 insertions(+)

diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f77371232d8c..a247bee24c73 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -4,6 +4,7 @@
  *
  * Copyright (C) 1995-1999 Russell King
  * Copyright (C) 2012 ARM Ltd.
+ * Copyright © 2024 Apple Inc. All rights reserved.
  */
 #ifndef __ASM_PROCESSOR_H
 #define __ASM_PROCESSOR_H
@@ -184,6 +185,9 @@ struct thread_struct {
 	u64			sctlr_user;
 	u64			svcr;
 	u64			tpidr2_el0;
+#ifdef CONFIG_ARM64_TSO
+	bool        tso;
+#endif
 };
 
 static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/include/asm/tso.h b/arch/arm64/include/asm/tso.h
index d9e1a7602c44..405e9a5efdf5 100644
--- a/arch/arm64/include/asm/tso.h
+++ b/arch/arm64/include/asm/tso.h
@@ -12,6 +12,7 @@
 #include <linux/types.h>
 
 int modify_tso_enable(bool tso_enable);
+void tso_thread_switch(struct task_struct *next);
 
 #endif /* CONFIG_ARM64_TSO */
 #endif /* __ASM_TSO_H */
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 4ae31b7af6c3..3831c1a97f79 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -5,6 +5,7 @@
  * Original Copyright (C) 1995  Linus Torvalds
  * Copyright (C) 1996-2000 Russell King - Converted to ARM.
  * Copyright (C) 2012 ARM Ltd.
+ * Copyright © 2024 Apple Inc. All rights reserved.
  */
 #include <linux/compat.h>
 #include <linux/efi.h>
@@ -55,6 +56,7 @@
 #include <asm/stacktrace.h>
 #include <asm/switch_to.h>
 #include <asm/system_misc.h>
+#include <asm/tso.h>
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 #include <linux/stackprotector.h>
@@ -530,6 +532,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
 	ssbs_thread_switch(next);
 	erratum_1418040_thread_switch(next);
 	ptrauth_thread_switch_user(next);
+#ifdef CONFIG_ARM64_TSO
+	tso_thread_switch(next);
+#endif
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
@@ -651,6 +656,10 @@ void arch_setup_new_exec(void)
 		arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
 					 PR_SPEC_ENABLE);
 	}
+
+#ifdef CONFIG_ARM64_TSO
+	modify_tso_enable(false);
+#endif
 }
 
 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
diff --git a/arch/arm64/kernel/tso.c b/arch/arm64/kernel/tso.c
index b3964db7aa66..9a15d825943f 100644
--- a/arch/arm64/kernel/tso.c
+++ b/arch/arm64/kernel/tso.c
@@ -3,6 +3,7 @@
  * Copyright © 2024 Apple Inc. All rights reserved.
  */
 
+#include <linux/sched.h>
 #include <linux/types.h>
 
 #include <asm/cputype.h>
@@ -49,4 +50,12 @@ int modify_tso_enable(bool tso_enable)
 	return 0;
 }
 
+void tso_thread_switch(struct task_struct *next)
+{
+	if (tso_supported()) {
+		current->thread.tso = tso_enabled();
+		modify_tso_enable(next->thread.tso);
+	}
+}
+
 #endif /* CONFIG_ARM64_TSO */
-- 
2.39.3 (Apple Git-146)


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ