[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240411-tso-v1-2-754f11abfbff@marcan.st>
Date: Thu, 11 Apr 2024 09:51:21 +0900
From: Hector Martin <marcan@...can.st>
To: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>
Cc: Zayd Qumsieh <zayd_qumsieh@...le.com>, Justin Lu <ih_justin@...le.com>,
Ryan Houdek <Houdek.Ryan@...-emu.org>, Mark Brown <broonie@...nel.org>,
Ard Biesheuvel <ardb@...nel.org>, Mateusz Guzik <mjguzik@...il.com>,
Anshuman Khandual <anshuman.khandual@....com>,
Oliver Upton <oliver.upton@...ux.dev>, Miguel Luis <miguel.luis@...cle.com>,
Joey Gouly <joey.gouly@....com>, Christoph Paasch <cpaasch@...le.com>,
Kees Cook <keescook@...omium.org>, Sami Tolvanen <samitolvanen@...gle.com>,
Baoquan He <bhe@...hat.com>, Joel Granados <j.granados@...sung.com>,
Dawei Li <dawei.li@...ngroup.cn>, Andrew Morton <akpm@...ux-foundation.org>,
Florent Revest <revest@...omium.org>, David Hildenbrand <david@...hat.com>,
Stefan Roesch <shr@...kernel.io>, Andy Chiu <andy.chiu@...ive.com>,
Josh Triplett <josh@...htriplett.org>, Oleg Nesterov <oleg@...hat.com>,
Helge Deller <deller@....de>, Zev Weiss <zev@...ilderbeest.net>,
Ondrej Mosnacek <omosnace@...hat.com>, Miguel Ojeda <ojeda@...nel.org>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Asahi Linux <asahi@...ts.linux.dev>, Hector Martin <marcan@...can.st>
Subject: [PATCH 2/4] arm64: Implement PR_{GET,SET}_MEM_MODEL for always-TSO
CPUs
Some ARM64 implementations are known to always use the TSO memory model.
Add trivial support for the PR_{GET,SET}_MEM_MODEL prctl, which allows
userspace to learn this fact.
Known TSO implementations:
- Nvidia Denver
- Nvidia Carmel
- Fujitsu A64FX
Signed-off-by: Hector Martin <marcan@...can.st>
---
arch/arm64/Kconfig | 9 +++++++++
arch/arm64/include/asm/cpufeature.h | 4 ++++
arch/arm64/kernel/Makefile | 3 ++-
arch/arm64/kernel/cpufeature.c | 11 +++++-----
arch/arm64/kernel/cpufeature_impdef.c | 38 +++++++++++++++++++++++++++++++++++
arch/arm64/kernel/process.c | 24 ++++++++++++++++++++++
arch/arm64/tools/cpucaps | 1 +
7 files changed, 84 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7b11c98b3e84..f8e66fe44ff4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2162,6 +2162,15 @@ config ARM64_DEBUG_PRIORITY_MASKING
If unsure, say N
endif # ARM64_PSEUDO_NMI
+config ARM64_MEMORY_MODEL_CONTROL
+ bool "Runtime memory model control"
+ help
+ Some ARM64 CPUs support runtime switching of the CPU memory
+ model, which can be useful to emulate other CPU architectures
+ which have different memory models. Say Y to enable support
+ for the PR_SET_MEM_MODEL/PR_GET_MEM_MODEL prctl() calls on
+ CPUs with this feature.
+
config RELOCATABLE
bool "Build a relocatable kernel image" if EXPERT
select ARCH_HAS_RELR
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 8b904a757bd3..fb215b0e7529 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -1032,6 +1032,10 @@ static inline bool cpu_has_lpa2(void)
#endif
}
+void __init init_cpucap_indirect_list_impdef(void);
+void __init init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps);
+bool cpufeature_matches(u64 reg, const struct arm64_cpu_capabilities *entry);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 763824963ed1..5eaaee7b8358 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -33,7 +33,8 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o idle.o patching.o pi/
+ syscall.o proton-pack.o idle.o patching.o pi/ \
+ cpufeature_impdef.o
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 56583677c1f2..e39ab93ad683 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1028,7 +1028,7 @@ static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
extern const struct arm64_cpu_capabilities arm64_errata[];
static const struct arm64_cpu_capabilities arm64_features[];
-static void __init
+void __init
init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++) {
@@ -1540,8 +1540,8 @@ has_always(const struct arm64_cpu_capabilities *entry, int scope)
return true;
}
-static bool
-feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
+bool
+cpufeature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
{
int val, min, max;
u64 tmp;
@@ -1594,14 +1594,14 @@ has_user_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
if (!mask)
return false;
- return feature_matches(val, entry);
+ return cpufeature_matches(val, entry);
}
static bool
has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
{
u64 val = read_scoped_sysreg(entry, scope);
- return feature_matches(val, entry);
+ return cpufeature_matches(val, entry);
}
const struct cpumask *system_32bit_el0_cpumask(void)
@@ -3486,6 +3486,7 @@ void __init setup_boot_cpu_features(void)
* handle the boot CPU.
*/
init_cpucap_indirect_list();
+ init_cpucap_indirect_list_impdef();
/*
* Detect broken pseudo-NMI. Must be called _before_ the call to
diff --git a/arch/arm64/kernel/cpufeature_impdef.c b/arch/arm64/kernel/cpufeature_impdef.c
new file mode 100644
index 000000000000..bb04a8e3d79d
--- /dev/null
+++ b/arch/arm64/kernel/cpufeature_impdef.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Contains implementation-defined CPU feature definitions.
+ */
+
+#include <asm/cpufeature.h>
+
+#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+static bool has_tso_fixed(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ /* List of CPUs that always use the TSO memory model */
+ static const struct midr_range fixed_tso_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
+ MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+ MIDR_ALL_VERSIONS(MIDR_FUJITSU_A64FX),
+ { /* sentinel */ }
+ };
+
+ return is_midr_in_range_list(read_cpuid_id(), fixed_tso_list);
+}
+#endif
+
+static const struct arm64_cpu_capabilities arm64_impdef_features[] = {
+#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+ {
+ .desc = "TSO memory model (Fixed)",
+ .capability = ARM64_HAS_TSO_FIXED,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_tso_fixed,
+ },
+#endif
+ {},
+};
+
+void __init init_cpucap_indirect_list_impdef(void)
+{
+ init_cpucap_indirect_list_from_array(arm64_impdef_features);
+}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 4ae31b7af6c3..7920056bad3e 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -41,6 +41,7 @@
#include <linux/thread_info.h>
#include <linux/prctl.h>
#include <linux/stacktrace.h>
+#include <linux/memory_ordering_model.h>
#include <asm/alternative.h>
#include <asm/compat.h>
@@ -513,6 +514,25 @@ void update_sctlr_el1(u64 sctlr)
isb();
}
+#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+int arch_prctl_mem_model_get(struct task_struct *t)
+{
+ return PR_SET_MEM_MODEL_DEFAULT;
+}
+
+int arch_prctl_mem_model_set(struct task_struct *t, unsigned long val)
+{
+ if (alternative_has_cap_unlikely(ARM64_HAS_TSO_FIXED) &&
+ val == PR_SET_MEM_MODEL_TSO)
+ return 0;
+
+ if (val == PR_SET_MEM_MODEL_DEFAULT)
+ return 0;
+
+ return -EINVAL;
+}
+#endif
+
/*
* Thread switching.
*/
@@ -651,6 +671,10 @@ void arch_setup_new_exec(void)
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
PR_SPEC_ENABLE);
}
+
+#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+ arch_prctl_mem_model_set(current, PR_SET_MEM_MODEL_DEFAULT);
+#endif
}
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 62b2838a231a..daa6b9495402 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -52,6 +52,7 @@ HAS_STAGE2_FWB
HAS_TCR2
HAS_TIDCP1
HAS_TLB_RANGE
+HAS_TSO_FIXED
HAS_VA52
HAS_VIRT_HOST_EXTN
HAS_WFXT
--
2.44.0
Powered by blists - more mailing lists