lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120080013.2153519-19-anup.patel@oss.qualcomm.com>
Date: Tue, 20 Jan 2026 13:30:04 +0530
From: Anup Patel <anup.patel@....qualcomm.com>
To: Paolo Bonzini <pbonzini@...hat.com>, Atish Patra <atish.patra@...ux.dev>
Cc: Palmer Dabbelt <palmer@...belt.com>, Paul Walmsley <pjw@...nel.org>,
        Alexandre Ghiti <alex@...ti.fr>, Shuah Khan <shuah@...nel.org>,
        Anup Patel <anup@...infault.org>,
        Andrew Jones <andrew.jones@....qualcomm.com>,
        kvm-riscv@...ts.infradead.org, kvm@...r.kernel.org,
        linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
        linux-kselftest@...r.kernel.org,
        Anup Patel <anup.patel@....qualcomm.com>
Subject: [PATCH 18/27] RISC-V: KVM: Trap-n-emulate SRET for Guest HS-mode

The guest HS-mode (aka L1/guest hypervisor) can enter Guest VS/VU-mode
(aka L2/nested guest) using the SRET instruction. To achieve this, host
host hypervisor must trap-n-emulate SRET instruction for guest HS-mode
(aka L1/guest hypervisor) using host hstatus.VTSR bit.

Trapping all SRET instructions executed by guest HS-mode (aka L1/guest
hypervisor) will impact performance so host hypervisor will only set
hstatus.VTSR bit when guest sets HSTATUS.SPV bit and hstatus.VTSR bit
is cleared by SRET emulation upon entry into guest VS/VU-mode.

Signed-off-by: Anup Patel <anup.patel@....qualcomm.com>
---
 arch/riscv/include/asm/insn.h            |  3 ++
 arch/riscv/include/asm/kvm_vcpu_nested.h |  2 +
 arch/riscv/kvm/Makefile                  |  1 +
 arch/riscv/kvm/vcpu_insn.c               |  6 +++
 arch/riscv/kvm/vcpu_nested_insn.c        | 54 ++++++++++++++++++++++++
 5 files changed, 66 insertions(+)
 create mode 100644 arch/riscv/kvm/vcpu_nested_insn.c

diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
index c3005573e8c9..24a8abb3283c 100644
--- a/arch/riscv/include/asm/insn.h
+++ b/arch/riscv/include/asm/insn.h
@@ -331,6 +331,9 @@ static __always_inline bool riscv_insn_is_c_jalr(u32 code)
 #define INSN_OPCODE_SHIFT	2
 #define INSN_OPCODE_SYSTEM	28
 
+#define INSN_MASK_SRET		0xffffffff
+#define INSN_MATCH_SRET		0x10200073
+
 #define INSN_MASK_WFI		0xffffffff
 #define INSN_MATCH_WFI		0x10500073
 
diff --git a/arch/riscv/include/asm/kvm_vcpu_nested.h b/arch/riscv/include/asm/kvm_vcpu_nested.h
index 6d9d252a378c..665c60f09ee6 100644
--- a/arch/riscv/include/asm/kvm_vcpu_nested.h
+++ b/arch/riscv/include/asm/kvm_vcpu_nested.h
@@ -63,6 +63,8 @@ struct kvm_vcpu_nested {
 
 #define kvm_riscv_vcpu_nested_virt(__vcpu) ((__vcpu)->arch.nested.virt)
 
+int kvm_riscv_vcpu_nested_insn_sret(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
+
 int kvm_riscv_vcpu_nested_swtlb_xlate(struct kvm_vcpu *vcpu,
 				      const struct kvm_cpu_trap *trap,
 				      struct kvm_gstage_mapping *out_map,
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index a8806b69205f..c0534d4a469e 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -26,6 +26,7 @@ kvm-y += vcpu_exit.o
 kvm-y += vcpu_fp.o
 kvm-y += vcpu_insn.o
 kvm-y += vcpu_nested.o
+kvm-y += vcpu_nested_insn.o
 kvm-y += vcpu_nested_swtlb.o
 kvm-y += vcpu_onereg.o
 kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o
diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c
index 4d89b94128ae..745cd654df94 100644
--- a/arch/riscv/kvm/vcpu_insn.c
+++ b/arch/riscv/kvm/vcpu_insn.c
@@ -9,6 +9,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm/insn.h>
+#include <asm/kvm_vcpu_nested.h>
 
 struct insn_func {
 	unsigned long mask;
@@ -257,6 +258,11 @@ static const struct insn_func system_opcode_funcs[] = {
 		.match = INSN_MATCH_CSRRCI,
 		.func  = csr_insn,
 	},
+	{
+		.mask  = INSN_MASK_SRET,
+		.match = INSN_MATCH_SRET,
+		.func  = kvm_riscv_vcpu_nested_insn_sret,
+	},
 	{
 		.mask  = INSN_MASK_WFI,
 		.match = INSN_MATCH_WFI,
diff --git a/arch/riscv/kvm/vcpu_nested_insn.c b/arch/riscv/kvm/vcpu_nested_insn.c
new file mode 100644
index 000000000000..8f5b2992dbb9
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_nested_insn.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2026 Qualcomm Technologies, Inc.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_nacl.h>
+#include <asm/kvm_vcpu_insn.h>
+
+int kvm_riscv_vcpu_nested_insn_sret(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
+{
+	unsigned long vsstatus, next_sepc, next_spp;
+	bool next_virt;
+
+	/*
+	 * Trap from virtual-VS/VU modes should be forwarded to
+	 * virtual-HS mode as a virtual instruction trap.
+	 */
+	if (kvm_riscv_vcpu_nested_virt(vcpu))
+		return KVM_INSN_VIRTUAL_TRAP;
+
+	/*
+	 * Trap from virtual-U mode should be forwarded to
+	 * virtual-HS mode as illegal instruction trap.
+	 */
+	if (!(vcpu->arch.guest_context.hstatus & HSTATUS_SPVP))
+		return KVM_INSN_ILLEGAL_TRAP;
+
+	vsstatus = ncsr_read(CSR_VSSTATUS);
+
+	/*
+	 * Find next nested virtualization mode, next privilege mode,
+	 * and next sepc
+	 */
+	next_virt = (vcpu->arch.nested.csr.hstatus & HSTATUS_SPV) ? true : false;
+	next_sepc = ncsr_read(CSR_VSEPC);
+	next_spp = vsstatus & SR_SPP;
+
+	/* Update Guest sstatus.sie */
+	vsstatus &= ~SR_SIE;
+	vsstatus |= (vsstatus & SR_SPIE) ? SR_SIE : 0;
+	ncsr_write(CSR_VSSTATUS, vsstatus);
+
+	/* Update return address and return privilege mode*/
+	vcpu->arch.guest_context.sepc = next_sepc;
+	vcpu->arch.guest_context.sstatus &= ~SR_SPP;
+	vcpu->arch.guest_context.sstatus |= next_spp;
+
+	/* Set nested virtualization state based on guest hstatus.SPV */
+	kvm_riscv_vcpu_nested_set_virt(vcpu, NESTED_SET_VIRT_EVENT_SRET,
+				       next_virt, false, false);
+
+	return KVM_INSN_CONTINUE_SAME_SEPC;
+}
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ