[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120080013.2153519-13-anup.patel@oss.qualcomm.com>
Date: Tue, 20 Jan 2026 13:29:58 +0530
From: Anup Patel <anup.patel@....qualcomm.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Atish Patra <atish.patra@...ux.dev>
Cc: Palmer Dabbelt <palmer@...belt.com>, Paul Walmsley <pjw@...nel.org>,
Alexandre Ghiti <alex@...ti.fr>, Shuah Khan <shuah@...nel.org>,
Anup Patel <anup@...infault.org>,
Andrew Jones <andrew.jones@....qualcomm.com>,
kvm-riscv@...ts.infradead.org, kvm@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Anup Patel <anup.patel@....qualcomm.com>
Subject: [PATCH 12/27] RISC-V: KVM: Extend kvm_riscv_mmu_update_hgatp() for nested virtualization
The kvm_riscv_mmu_update_hgatp() will be also used for when switching
between guest HS-mode and guest VS/VU-mode so extend it accordingly.
Signed-off-by: Anup Patel <anup.patel@....qualcomm.com>
---
arch/riscv/include/asm/kvm_gstage.h | 2 ++
arch/riscv/include/asm/kvm_mmu.h | 2 +-
arch/riscv/kvm/gstage.c | 14 ++++++++++++++
arch/riscv/kvm/mmu.c | 18 ++++++++----------
arch/riscv/kvm/vcpu.c | 4 ++--
5 files changed, 27 insertions(+), 13 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_gstage.h b/arch/riscv/include/asm/kvm_gstage.h
index 595e2183173e..007a5fd7a526 100644
--- a/arch/riscv/include/asm/kvm_gstage.h
+++ b/arch/riscv/include/asm/kvm_gstage.h
@@ -67,6 +67,8 @@ void kvm_riscv_gstage_unmap_range(struct kvm_gstage *gstage,
void kvm_riscv_gstage_wp_range(struct kvm_gstage *gstage, gpa_t start, gpa_t end);
+void kvm_riscv_gstage_update_hgatp(phys_addr_t pgd_phys, unsigned long vmid);
+
void kvm_riscv_gstage_mode_detect(void);
#endif
diff --git a/arch/riscv/include/asm/kvm_mmu.h b/arch/riscv/include/asm/kvm_mmu.h
index 5439e76f0a96..cc5994ec2805 100644
--- a/arch/riscv/include/asm/kvm_mmu.h
+++ b/arch/riscv/include/asm/kvm_mmu.h
@@ -16,6 +16,6 @@ int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
struct kvm_gstage_mapping *out_map);
int kvm_riscv_mmu_alloc_pgd(struct kvm *kvm);
void kvm_riscv_mmu_free_pgd(struct kvm *kvm);
-void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu);
+void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu, bool nested_virt);
#endif
diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c
index b67d60d722c2..7834e1178b68 100644
--- a/arch/riscv/kvm/gstage.c
+++ b/arch/riscv/kvm/gstage.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pgtable.h>
#include <asm/kvm_gstage.h>
+#include <asm/kvm_nacl.h>
#ifdef CONFIG_64BIT
unsigned long kvm_riscv_gstage_mode __ro_after_init = HGATP_MODE_SV39X4;
@@ -313,6 +314,19 @@ void kvm_riscv_gstage_wp_range(struct kvm_gstage *gstage, gpa_t start, gpa_t end
}
}
+void kvm_riscv_gstage_update_hgatp(phys_addr_t pgd_phys, unsigned long vmid)
+{
+ unsigned long hgatp = kvm_riscv_gstage_mode << HGATP_MODE_SHIFT;
+
+ hgatp |= (vmid << HGATP_VMID_SHIFT) & HGATP_VMID;
+ hgatp |= (pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
+
+ ncsr_write(CSR_HGATP, hgatp);
+
+ if (!kvm_riscv_gstage_vmid_bits())
+ kvm_riscv_local_hfence_gvma_all();
+}
+
void __init kvm_riscv_gstage_mode_detect(void)
{
#ifdef CONFIG_64BIT
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 0b75eb2a1820..250606f5aa41 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -14,7 +14,6 @@
#include <linux/kvm_host.h>
#include <linux/sched/signal.h>
#include <asm/kvm_mmu.h>
-#include <asm/kvm_nacl.h>
static void mmu_wp_memory_region(struct kvm *kvm, int slot)
{
@@ -597,16 +596,15 @@ void kvm_riscv_mmu_free_pgd(struct kvm *kvm)
free_pages((unsigned long)pgd, get_order(kvm_riscv_gstage_pgd_size));
}
-void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu)
+void kvm_riscv_mmu_update_hgatp(struct kvm_vcpu *vcpu, bool nested_virt)
{
- unsigned long hgatp = kvm_riscv_gstage_mode << HGATP_MODE_SHIFT;
+ struct kvm_vcpu_nested_swtlb *nst = &vcpu->arch.nested.swtlb;
struct kvm_arch *k = &vcpu->kvm->arch;
+ unsigned long vmid = READ_ONCE(k->vmid.vmid);
- hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID;
- hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
-
- ncsr_write(CSR_HGATP, hgatp);
-
- if (!kvm_riscv_gstage_vmid_bits())
- kvm_riscv_local_hfence_gvma_all();
+ if (nested_virt)
+ kvm_riscv_gstage_update_hgatp(nst->shadow_pgd_phys,
+ kvm_riscv_gstage_nested_vmid(vmid));
+ else
+ kvm_riscv_gstage_update_hgatp(k->pgd_phys, vmid);
}
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 859c8e71df65..178a4409d4e9 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -585,7 +585,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_VSATP, csr->vsatp);
}
- kvm_riscv_mmu_update_hgatp(vcpu);
+ kvm_riscv_mmu_update_hgatp(vcpu, kvm_riscv_vcpu_nested_virt(vcpu));
kvm_riscv_vcpu_timer_restore(vcpu);
@@ -677,7 +677,7 @@ static int kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
kvm_riscv_reset_vcpu(vcpu, true);
if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
- kvm_riscv_mmu_update_hgatp(vcpu);
+ kvm_riscv_mmu_update_hgatp(vcpu, kvm_riscv_vcpu_nested_virt(vcpu));
if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
kvm_riscv_fence_i_process(vcpu);
--
2.43.0
Powered by blists - more mailing lists