[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120080013.2153519-14-anup.patel@oss.qualcomm.com>
Date: Tue, 20 Jan 2026 13:29:59 +0530
From: Anup Patel <anup.patel@....qualcomm.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Atish Patra <atish.patra@...ux.dev>
Cc: Palmer Dabbelt <palmer@...belt.com>, Paul Walmsley <pjw@...nel.org>,
Alexandre Ghiti <alex@...ti.fr>, Shuah Khan <shuah@...nel.org>,
Anup Patel <anup@...infault.org>,
Andrew Jones <andrew.jones@....qualcomm.com>,
kvm-riscv@...ts.infradead.org, kvm@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Anup Patel <anup.patel@....qualcomm.com>
Subject: [PATCH 13/27] RISC-V: KVM: Extend kvm_riscv_vcpu_config_load() for nested virtualization
The kvm_riscv_vcpu_config_load() will be also used for when switching
between guest HS-mode and guest VS/VU-mode so extend it accordingly.
Signed-off-by: Anup Patel <anup.patel@....qualcomm.com>
---
arch/riscv/include/asm/kvm_vcpu_config.h | 2 +-
arch/riscv/kvm/vcpu.c | 2 +-
arch/riscv/kvm/vcpu_config.c | 55 ++++++++++++++++++------
3 files changed, 44 insertions(+), 15 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_vcpu_config.h b/arch/riscv/include/asm/kvm_vcpu_config.h
index fcc15a0296b3..be7bffb6a428 100644
--- a/arch/riscv/include/asm/kvm_vcpu_config.h
+++ b/arch/riscv/include/asm/kvm_vcpu_config.h
@@ -20,6 +20,6 @@ struct kvm_vcpu_config {
void kvm_riscv_vcpu_config_init(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_config_guest_debug(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_config_ran_once(struct kvm_vcpu *vcpu);
-void kvm_riscv_vcpu_config_load(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_config_load(struct kvm_vcpu *vcpu, bool nested_virt);
#endif
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 178a4409d4e9..077637aff9a2 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -560,7 +560,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* the read/write behaviour of certain CSRs change
* based on VCPU config CSRs.
*/
- kvm_riscv_vcpu_config_load(vcpu);
+ kvm_riscv_vcpu_config_load(vcpu, kvm_riscv_vcpu_nested_virt(vcpu));
if (kvm_riscv_nacl_sync_csr_available()) {
nsh = nacl_shmem();
diff --git a/arch/riscv/kvm/vcpu_config.c b/arch/riscv/kvm/vcpu_config.c
index eb7374402b07..6c49bd6f83c5 100644
--- a/arch/riscv/kvm/vcpu_config.c
+++ b/arch/riscv/kvm/vcpu_config.c
@@ -69,33 +69,62 @@ void kvm_riscv_vcpu_config_ran_once(struct kvm_vcpu *vcpu)
cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
}
-void kvm_riscv_vcpu_config_load(struct kvm_vcpu *vcpu)
+void kvm_riscv_vcpu_config_load(struct kvm_vcpu *vcpu, bool nested_virt)
{
+ struct kvm_vcpu_nested_csr *nsc = &vcpu->arch.nested.csr;
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+ unsigned long hedeleg, hideleg, tmp;
+ u64 henvcfg, hstateen0;
void *nsh;
+ if (nested_virt) {
+ hedeleg = nsc->hedeleg;
+ hideleg = 0;
+ henvcfg = 0;
+ hstateen0 = 0;
+ } else {
+ hedeleg = cfg->hedeleg;
+ hideleg = cfg->hideleg;
+ henvcfg = cfg->henvcfg;
+ hstateen0 = cfg->hstateen0;
+ }
+
if (kvm_riscv_nacl_sync_csr_available()) {
nsh = nacl_shmem();
- nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg);
- nacl_csr_write(nsh, CSR_HIDELEG, cfg->hideleg);
- nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg);
+ nacl_csr_write(nsh, CSR_HEDELEG, hedeleg);
+ nacl_csr_write(nsh, CSR_HIDELEG, hideleg);
+ nacl_csr_write(nsh, CSR_HENVCFG, henvcfg);
if (IS_ENABLED(CONFIG_32BIT))
- nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32);
+ nacl_csr_write(nsh, CSR_HENVCFGH, henvcfg >> 32);
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
- nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0);
+ nacl_csr_write(nsh, CSR_HSTATEEN0, hstateen0);
if (IS_ENABLED(CONFIG_32BIT))
- nacl_csr_write(nsh, CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
+ nacl_csr_write(nsh, CSR_HSTATEEN0H, hstateen0 >> 32);
+ }
+ if (kvm_riscv_aia_available()) {
+ tmp = nacl_csr_read(nsh, CSR_HVICTL);
+ if (nested_virt)
+ tmp |= HVICTL_VTI;
+ else
+ tmp &= ~HVICTL_VTI;
+ nacl_csr_write(nsh, CSR_HVICTL, tmp);
}
} else {
- csr_write(CSR_HEDELEG, cfg->hedeleg);
- csr_write(CSR_HIDELEG, cfg->hideleg);
- csr_write(CSR_HENVCFG, cfg->henvcfg);
+ csr_write(CSR_HEDELEG, hedeleg);
+ csr_write(CSR_HIDELEG, hideleg);
+ csr_write(CSR_HENVCFG, henvcfg);
if (IS_ENABLED(CONFIG_32BIT))
- csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
+ csr_write(CSR_HENVCFGH, henvcfg >> 32);
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
- csr_write(CSR_HSTATEEN0, cfg->hstateen0);
+ csr_write(CSR_HSTATEEN0, hstateen0);
if (IS_ENABLED(CONFIG_32BIT))
- csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
+ csr_write(CSR_HSTATEEN0H, hstateen0 >> 32);
+ }
+ if (kvm_riscv_aia_available()) {
+ if (nested_virt)
+ csr_set(CSR_HVICTL, HVICTL_VTI);
+ else
+ csr_clear(CSR_HVICTL, HVICTL_VTI);
}
}
}
--
2.43.0
Powered by blists - more mailing lists