[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231222-kvm-arm64-sme-v2-14-da226cb180bb@kernel.org>
Date: Fri, 22 Dec 2023 16:21:22 +0000
From: Mark Brown <broonie@...nel.org>
To: Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
James Morse <james.morse@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>,
Paolo Bonzini <pbonzini@...hat.com>, Jonathan Corbet <corbet@....net>,
Shuah Khan <shuah@...nel.org>
Cc: linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
linux-doc@...r.kernel.org, linux-kselftest@...r.kernel.org,
Mark Brown <broonie@...nel.org>
Subject: [PATCH RFC v2 14/22] KVM: arm64: Manage and handle SME traps
Now that we have support for managing SME state for KVM guests add
handling for SME exceptions generated by guests. As with SVE these are
routed to the generic floating point exception handlers for both VHE and
nVHE, the floating point state is handled as a uniform block.
Since we do not presently support SME for protected VMs handle
exceptions from protected guests as UNDEF.
For nVHE and hVHE modes we currently do a lazy restore of the host EL2
setup for SVE, do the same for SME. Since it is likely that there will be
common situations where SVE and SME are both used in quick succession by
the host (eg, saving the guest state) restore the configuration for both at
once in order to minimise the number of EL2 entries.
Signed-off-by: Mark Brown <broonie@...nel.org>
---
arch/arm64/include/asm/kvm_emulate.h | 12 ++++----
arch/arm64/kvm/handle_exit.c | 11 +++++++
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 56 ++++++++++++++++++++++++++++++------
arch/arm64/kvm/hyp/nvhe/switch.c | 13 ++++-----
arch/arm64/kvm/hyp/vhe/switch.c | 3 ++
5 files changed, 73 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 14d6ff2e2a39..756c2c28c592 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -584,16 +584,15 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
if (has_vhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
- CPACR_EL1_ZEN_EL1EN);
- if (cpus_have_final_cap(ARM64_SME))
- val |= CPACR_EL1_SMEN_EL1EN;
+ CPACR_EL1_ZEN_EL1EN | CPACR_EL1_SMEN_EL1EN);
} else if (has_hvhe()) {
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
if (!vcpu_has_sve(vcpu) ||
(vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
- if (cpus_have_final_cap(ARM64_SME))
+ if (!vcpu_has_sme(vcpu) ||
+ (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
} else {
val = CPTR_NVHE_EL2_RES1;
@@ -602,8 +601,9 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
if (vcpu_has_sve(vcpu) &&
(vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
val |= CPTR_EL2_TZ;
- if (cpus_have_final_cap(ARM64_SME))
- val &= ~CPTR_EL2_TSM;
+ if (vcpu_has_sme(vcpu) &&
+ (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
+ val |= CPTR_EL2_TSM;
}
return val;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 617ae6dea5d5..e5d8d8767872 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -206,6 +206,16 @@ static int handle_sve(struct kvm_vcpu *vcpu)
return 1;
}
+/*
+ * Guest access to SME registers should be routed to this handler only
+ * when the system doesn't support SME.
+ */
+static int handle_sme(struct kvm_vcpu *vcpu)
+{
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
/*
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
@@ -268,6 +278,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_SVC64] = handle_svc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_SVE] = handle_sve,
+ [ESR_ELx_EC_SME] = handle_sme,
[ESR_ELx_EC_ERET] = kvm_handle_eret,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 56808df6a078..b2da4800b673 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -411,6 +411,52 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
kvm_skip_host_instr();
}
+static void handle_host_vec(void)
+{
+ u64 old_smcr, new_smcr;
+ u64 mask = 0;
+
+ /*
+ * Handle lazy restore of the EL2 configuration for host SVE
+ * and SME usage. It is likely that when a host supports both
+ * SVE and SME it will use both in quick succession (eg,
+ * saving guest state) so we restore both when either traps.
+ */
+ if (has_hvhe()) {
+ if (cpus_have_final_cap(ARM64_SVE))
+ mask |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
+ if (cpus_have_final_cap(ARM64_SME))
+ mask |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
+
+ sysreg_clear_set(cpacr_el1, 0, mask);
+ } else {
+ if (cpus_have_final_cap(ARM64_SVE))
+ mask |= CPTR_EL2_TZ;
+ if (cpus_have_final_cap(ARM64_SME))
+ mask |= CPTR_EL2_TSM;
+
+ sysreg_clear_set(cptr_el2, mask, 0);
+ }
+
+ isb();
+
+ if (cpus_have_final_cap(ARM64_SVE))
+ sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+
+ if (cpus_have_final_cap(ARM64_SME)) {
+ old_smcr = read_sysreg_s(SYS_SMCR_EL2);
+ new_smcr = SMCR_ELx_LEN_MASK;
+
+ if (cpus_have_final_cap(ARM64_SME_FA64))
+ new_smcr |= SMCR_ELx_FA64_MASK;
+ if (cpus_have_final_cap(ARM64_SME2))
+ new_smcr |= SMCR_ELx_EZT0_MASK;
+
+ if (old_smcr != new_smcr)
+ write_sysreg_s(new_smcr, SYS_SMCR_EL2);
+ }
+}
+
void handle_trap(struct kvm_cpu_context *host_ctxt)
{
u64 esr = read_sysreg_el2(SYS_ESR);
@@ -423,14 +469,8 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_SVE:
- /* Handle lazy restore of the host VL */
- if (has_hvhe())
- sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
- CPACR_EL1_ZEN_EL0EN));
- else
- sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
- isb();
- sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+ case ESR_ELx_EC_SME:
+ handle_host_vec();
break;
case ESR_ELx_EC_IABT_LOW:
case ESR_ELx_EC_DABT_LOW:
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index c50f8459e4fc..b022728edb2f 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -46,19 +46,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = vcpu->arch.cptr_el2;
val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
- if (cpus_have_final_cap(ARM64_SME)) {
- if (has_hvhe())
- val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
- else
- val |= CPTR_EL2_TSM;
- }
if (!guest_owns_fp_regs(vcpu)) {
if (has_hvhe())
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
- CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+ CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
+ CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
else
- val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+ val |= CPTR_EL2_TFP | CPTR_EL2_TZ | CPTR_EL2_TSM;
__activate_traps_fpsimd32(vcpu);
}
@@ -186,6 +181,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
+ [ESR_ELx_EC_SME] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
@@ -198,6 +194,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
static const exit_handler_fn pvm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
+ [ESR_ELx_EC_SME] = kvm_handle_pvm_restricted,
[ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 1581df6aec87..0b1a9733f3e0 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -78,6 +78,8 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
if (guest_owns_fp_regs(vcpu)) {
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+ if (vcpu_has_sme(vcpu))
+ val |= CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN;
} else {
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
__activate_traps_fpsimd32(vcpu);
@@ -177,6 +179,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
+ [ESR_ELx_EC_SME] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
--
2.30.2
Powered by blists - more mailing lists