[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120080013.2153519-17-anup.patel@oss.qualcomm.com>
Date: Tue, 20 Jan 2026 13:30:02 +0530
From: Anup Patel <anup.patel@....qualcomm.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Atish Patra <atish.patra@...ux.dev>
Cc: Palmer Dabbelt <palmer@...belt.com>, Paul Walmsley <pjw@...nel.org>,
Alexandre Ghiti <alex@...ti.fr>, Shuah Khan <shuah@...nel.org>,
Anup Patel <anup@...infault.org>,
Andrew Jones <andrew.jones@....qualcomm.com>,
kvm-riscv@...ts.infradead.org, kvm@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Anup Patel <anup.patel@....qualcomm.com>
Subject: [PATCH 16/27] RISC-V: KVM: Check and inject nested virtual interrupts
When entering guest in virtual-VS/VU mode (aka nested guest),
check and inject nested virtual interrupt right before guest
entry.
Signed-off-by: Anup Patel <anup.patel@....qualcomm.com>
---
arch/riscv/include/asm/kvm_vcpu_nested.h | 1 +
arch/riscv/kvm/vcpu.c | 3 ++
arch/riscv/kvm/vcpu_nested.c | 49 ++++++++++++++++++++++++
3 files changed, 53 insertions(+)
diff --git a/arch/riscv/include/asm/kvm_vcpu_nested.h b/arch/riscv/include/asm/kvm_vcpu_nested.h
index 6bfb67702610..6d9d252a378c 100644
--- a/arch/riscv/include/asm/kvm_vcpu_nested.h
+++ b/arch/riscv/include/asm/kvm_vcpu_nested.h
@@ -86,6 +86,7 @@ void kvm_riscv_vcpu_nested_set_virt(struct kvm_vcpu *vcpu,
void kvm_riscv_vcpu_nested_trap_redirect(struct kvm_vcpu *vcpu,
struct kvm_cpu_trap *trap,
bool prev_priv);
+void kvm_riscv_vcpu_nested_vsirq_process(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_nested_reset(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_nested_init(struct kvm_vcpu *vcpu);
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 077637aff9a2..f8c4344c2b1f 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -934,6 +934,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
kvm_riscv_local_tlb_sanitize(vcpu);
+ /* Check and inject nested virtual interrupts */
+ kvm_riscv_vcpu_nested_vsirq_process(vcpu);
+
trace_kvm_entry(vcpu);
guest_timing_enter_irqoff();
diff --git a/arch/riscv/kvm/vcpu_nested.c b/arch/riscv/kvm/vcpu_nested.c
index 214206fc28bb..9b2b3369a232 100644
--- a/arch/riscv/kvm/vcpu_nested.c
+++ b/arch/riscv/kvm/vcpu_nested.c
@@ -172,6 +172,55 @@ void kvm_riscv_vcpu_nested_trap_redirect(struct kvm_vcpu *vcpu,
false, prev_priv, gva);
}
+void kvm_riscv_vcpu_nested_vsirq_process(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_nested *ns = &vcpu->arch.nested;
+ struct kvm_vcpu_nested_csr *nsc = &ns->csr;
+ struct kvm_cpu_trap trap;
+ unsigned long irqs;
+ bool next_spp;
+ int vsirq;
+
+ /* Do nothing if nested virtualization is OFF */
+ if (!ns->virt)
+ return;
+
+ /* Determine the virtual-VS mode interrupt number */
+ vsirq = 0;
+ irqs = nsc->hvip;
+ irqs &= nsc->vsie << VSIP_TO_HVIP_SHIFT;
+ irqs &= nsc->hideleg;
+ if (irqs & BIT(IRQ_VS_EXT))
+ vsirq = IRQ_S_EXT;
+ else if (irqs & BIT(IRQ_VS_TIMER))
+ vsirq = IRQ_S_TIMER;
+ else if (irqs & BIT(IRQ_VS_SOFT))
+ vsirq = IRQ_S_SOFT;
+ if (vsirq <= 0)
+ return;
+
+ /*
+ * Determine whether we are resuming in virtual-VS mode
+ * or virtual-VU mode.
+ */
+ next_spp = !!(vcpu->arch.guest_context.sstatus & SR_SPP);
+
+ /*
+ * If we are going to virtual-VS mode and interrupts are
+ * disabled then do nothing.
+ */
+ if (next_spp && !(ncsr_read(CSR_VSSTATUS) & SR_SIE))
+ return;
+
+ /* Take virtual-VS mode interrupt */
+ trap.scause = CAUSE_IRQ_FLAG | vsirq;
+ trap.sepc = vcpu->arch.guest_context.sepc;
+ trap.stval = 0;
+ trap.htval = 0;
+ trap.htinst = 0;
+ kvm_riscv_vcpu_trap_smode_redirect(vcpu, &trap, next_spp);
+}
+
void kvm_riscv_vcpu_nested_reset(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_nested *ns = &vcpu->arch.nested;
--
2.43.0
Powered by blists - more mailing lists