[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251219181147.3404071-12-ben.horgan@arm.com>
Date: Fri, 19 Dec 2025 18:11:13 +0000
From: Ben Horgan <ben.horgan@....com>
To: ben.horgan@....com
Cc: amitsinght@...vell.com,
baisheng.gao@...soc.com,
baolin.wang@...ux.alibaba.com,
carl@...amperecomputing.com,
dave.martin@....com,
david@...nel.org,
dfustini@...libre.com,
fenghuay@...dia.com,
gshan@...hat.com,
james.morse@....com,
jonathan.cameron@...wei.com,
kobak@...dia.com,
lcherian@...vell.com,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
peternewman@...gle.com,
punit.agrawal@....qualcomm.com,
quic_jiles@...cinc.com,
reinette.chatre@...el.com,
rohit.mathew@....com,
scott@...amperecomputing.com,
sdonthineni@...dia.com,
tan.shaopeng@...itsu.com,
xhao@...ux.alibaba.com,
catalin.marinas@....com,
will@...nel.org,
corbet@....net,
maz@...nel.org,
oupton@...nel.org,
joey.gouly@....com,
suzuki.poulose@....com,
kvmarm@...ts.linux.dev
Subject: [PATCH v2 11/45] arm64: mpam: Initialise and context switch the MPAMSM_EL1 register
The MPAMSM_EL1 sets the MPAM labels, PMG and PARTID, for loads and stores
generated by a shared SMCU. Disable the traps so the kernel can use it and
set it to the same configuration as the per-EL cpu MPAM configuration.
If an SMCU is not shared with other cpus then it is implementation defined
whether the configuration from MPAMSM_EL1 is used or that from the
appropriate MPAMy_ELx. As we set the same configuration for MPAM0_EL1,
MPAM1_EL1 and MPAMSM_EL1 the resulting configuration is the same
regardless.
Signed-off-by: Ben Horgan <ben.horgan@....com>
---
arch/arm64/include/asm/el2_setup.h | 3 ++-
arch/arm64/include/asm/mpam.h | 2 ++
arch/arm64/kernel/cpufeature.c | 2 ++
arch/arm64/kernel/mpam.c | 3 +++
4 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index cacd20df1786..d37984c09799 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -504,7 +504,8 @@
check_override id_aa64pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT, .Linit_mpam_\@, .Lskip_mpam_\@, x1, x2
.Linit_mpam_\@:
- msr_s SYS_MPAM2_EL2, xzr // use the default partition
+ mov x0, #MPAM2_EL2_EnMPAMSM_MASK
+ msr_s SYS_MPAM2_EL2, x0 // use the default partition,
// and disable lower traps
mrs_s x0, SYS_MPAMIDR_EL1
tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg
diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h
index 2ab3dca6977c..c4cb015e8d8f 100644
--- a/arch/arm64/include/asm/mpam.h
+++ b/arch/arm64/include/asm/mpam.h
@@ -63,6 +63,8 @@ static inline void mpam_thread_switch(struct task_struct *tsk)
return;
write_sysreg_s(regval, SYS_MPAM1_EL1);
+ if (system_supports_sme())
+ write_sysreg_s(regval & (MPAMSM_EL1_PARTID_D | MPAMSM_EL1_PMG_D), SYS_MPAMSM_EL1);
isb();
/* Synchronising the EL0 write is left until the ERET to EL0 */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0cdfb3728f43..2ede543b3eeb 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2491,6 +2491,8 @@ cpu_enable_mpam(const struct arm64_cpu_capabilities *entry)
regval = READ_ONCE(per_cpu(arm64_mpam_current, cpu));
write_sysreg_s(regval, SYS_MPAM1_EL1);
+ if (system_supports_sme())
+ write_sysreg_s(regval & (MPAMSM_EL1_PARTID_D | MPAMSM_EL1_PMG_D), SYS_MPAMSM_EL1);
isb();
/* Synchronising the EL0 write is left until the ERET to EL0 */
diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c
index dbe0a2d05abb..6ce4a36469ce 100644
--- a/arch/arm64/kernel/mpam.c
+++ b/arch/arm64/kernel/mpam.c
@@ -28,6 +28,9 @@ static int mpam_pm_notifier(struct notifier_block *self,
*/
regval = READ_ONCE(per_cpu(arm64_mpam_current, cpu));
write_sysreg_s(regval, SYS_MPAM1_EL1);
+ if (system_supports_sme())
+ write_sysreg_s(regval & (MPAMSM_EL1_PARTID_D | MPAMSM_EL1_PMG_D),
+ SYS_MPAMSM_EL1);
isb();
write_sysreg_s(regval, SYS_MPAM0_EL1);
--
2.43.0
Powered by blists - more mailing lists