[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120080013.2153519-12-anup.patel@oss.qualcomm.com>
Date: Tue, 20 Jan 2026 13:29:57 +0530
From: Anup Patel <anup.patel@....qualcomm.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Atish Patra <atish.patra@...ux.dev>
Cc: Palmer Dabbelt <palmer@...belt.com>, Paul Walmsley <pjw@...nel.org>,
Alexandre Ghiti <alex@...ti.fr>, Shuah Khan <shuah@...nel.org>,
Anup Patel <anup@...infault.org>,
Andrew Jones <andrew.jones@....qualcomm.com>,
kvm-riscv@...ts.infradead.org, kvm@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Anup Patel <anup.patel@....qualcomm.com>
Subject: [PATCH 11/27] RISC-V: KVM: Use half VMID space for nested guest
A single guest with nested virtualization needs two VMIDs: one for
the guest hypervisor (L1) and another for the nested guest (L2).
To support this, divide the VMID space into two equal parts when
nested virtualization is enabled.
Signed-off-by: Anup Patel <anup.patel@....qualcomm.com>
---
arch/riscv/include/asm/kvm_vmid.h | 1 +
arch/riscv/kvm/main.c | 4 ++--
arch/riscv/kvm/tlb.c | 11 +++++++++--
arch/riscv/kvm/vmid.c | 33 ++++++++++++++++++++++++++++---
4 files changed, 42 insertions(+), 7 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_vmid.h b/arch/riscv/include/asm/kvm_vmid.h
index db61b0525a8d..3048e12a639c 100644
--- a/arch/riscv/include/asm/kvm_vmid.h
+++ b/arch/riscv/include/asm/kvm_vmid.h
@@ -19,6 +19,7 @@ struct kvm_vmid {
void __init kvm_riscv_gstage_vmid_detect(void);
unsigned long kvm_riscv_gstage_vmid_bits(void);
+unsigned long kvm_riscv_gstage_nested_vmid(unsigned long vmid);
int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index 5b4bf972d242..28044eefda47 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -123,8 +123,6 @@ static int __init riscv_kvm_init(void)
return -ENODEV;
}
- kvm_riscv_gstage_vmid_detect();
-
rc = kvm_riscv_aia_init();
if (rc && rc != -ENODEV) {
kvm_riscv_nacl_exit();
@@ -133,6 +131,8 @@ static int __init riscv_kvm_init(void)
kvm_riscv_nested_init();
+ kvm_riscv_gstage_vmid_detect();
+
kvm_info("hypervisor extension available\n");
if (kvm_riscv_nacl_available()) {
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index ff1aeac4eb8e..a95aa5336560 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -160,7 +160,7 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
{
- unsigned long vmid;
+ unsigned long vmid, nvmid;
if (!kvm_riscv_gstage_vmid_bits() ||
vcpu->arch.last_exit_cpu == vcpu->cpu)
@@ -180,12 +180,19 @@ void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
kvm_riscv_local_hfence_gvma_vmid_all(vmid);
+ nvmid = kvm_riscv_gstage_nested_vmid(vmid);
+ if (vmid != nvmid)
+ kvm_riscv_local_hfence_gvma_vmid_all(nvmid);
+
/*
* Flush VS-stage TLB entries for implementation where VS-stage
* TLB does not cahce guest physical address and VMID.
*/
- if (static_branch_unlikely(&kvm_riscv_vsstage_tlb_no_gpa))
+ if (static_branch_unlikely(&kvm_riscv_vsstage_tlb_no_gpa)) {
kvm_riscv_local_hfence_vvma_all(vmid);
+ if (vmid != nvmid)
+ kvm_riscv_local_hfence_vvma_all(nvmid);
+ }
}
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c
index cf34d448289d..2ddd95fe2d9c 100644
--- a/arch/riscv/kvm/vmid.c
+++ b/arch/riscv/kvm/vmid.c
@@ -25,6 +25,8 @@ static DEFINE_SPINLOCK(vmid_lock);
void __init kvm_riscv_gstage_vmid_detect(void)
{
+ unsigned long min_vmids;
+
/* Figure-out number of VMID bits in HW */
csr_write(CSR_HGATP, (kvm_riscv_gstage_mode << HGATP_MODE_SHIFT) | HGATP_VMID);
vmid_bits = csr_read(CSR_HGATP);
@@ -35,8 +37,23 @@ void __init kvm_riscv_gstage_vmid_detect(void)
/* We polluted local TLB so flush all guest TLB */
kvm_riscv_local_hfence_gvma_all();
- /* We don't use VMID bits if they are not sufficient */
- if ((1UL << vmid_bits) < num_possible_cpus())
+ /*
+ * A single guest with nested virtualization needs two
+ * VMIDs: one for the guest hypervisor (L1) and another
+ * for the nested guest (L2).
+ *
+ * Potentially, we can have a separate guest running on
+ * each host CPU so the number of VMIDs should not be:
+ *
+ * 1. less than the number of host CPUs for
+ * nested virtualization disabled
+ * 2. less than twice the number of host CPUs for
+ * nested virtualization enabled
+ */
+ min_vmids = num_possible_cpus();
+ if (kvm_riscv_nested_available())
+ min_vmids = min_vmids * 2;
+ if (BIT(vmid_bits) < min_vmids)
vmid_bits = 0;
}
@@ -45,6 +62,13 @@ unsigned long kvm_riscv_gstage_vmid_bits(void)
return vmid_bits;
}
+unsigned long kvm_riscv_gstage_nested_vmid(unsigned long vmid)
+{
+ if (kvm_riscv_nested_available())
+ return vmid | BIT(vmid_bits - 1);
+ return vmid;
+}
+
int kvm_riscv_gstage_vmid_init(struct kvm *kvm)
{
/* Mark the initial VMID and VMID version invalid */
@@ -112,7 +136,10 @@ void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
vmid->vmid = vmid_next;
vmid_next++;
- vmid_next &= (1 << vmid_bits) - 1;
+ if (kvm_riscv_nested_available())
+ vmid_next &= BIT(vmid_bits - 1) - 1;
+ else
+ vmid_next &= BIT(vmid_bits) - 1;
WRITE_ONCE(vmid->vmid_version, READ_ONCE(vmid_version));
--
2.43.0
Powered by blists - more mailing lists