[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230419221716.3603068-37-atishp@rivosinc.com>
Date: Wed, 19 Apr 2023 15:17:04 -0700
From: Atish Patra <atishp@...osinc.com>
To: linux-kernel@...r.kernel.org
Cc: Rajnesh Kanwal <rkanwal@...osinc.com>,
Atish Patra <atishp@...osinc.com>,
Alexandre Ghiti <alex@...ti.fr>,
Andrew Jones <ajones@...tanamicro.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Anup Patel <anup@...infault.org>,
Atish Patra <atishp@...shpatra.org>,
Björn Töpel <bjorn@...osinc.com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Sean Christopherson <seanjc@...gle.com>,
linux-coco@...ts.linux.dev, Dylan Reid <dylan@...osinc.com>,
abrestic@...osinc.com, Samuel Ortiz <sameo@...osinc.com>,
Christoph Hellwig <hch@...radead.org>,
Conor Dooley <conor.dooley@...rochip.com>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Guo Ren <guoren@...nel.org>, Heiko Stuebner <heiko@...ech.de>,
Jiri Slaby <jirislaby@...nel.org>,
kvm-riscv@...ts.infradead.org, kvm@...r.kernel.org,
linux-mm@...ck.org, linux-riscv@...ts.infradead.org,
Mayuresh Chitale <mchitale@...tanamicro.com>,
Palmer Dabbelt <palmer@...belt.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Uladzislau Rezki <urezki@...il.com>
Subject: [RFC 36/48] RISC-V: KVM: Read/write gprs from/to shmem in case of TVM VCPU.
From: Rajnesh Kanwal <rkanwal@...osinc.com>
For TVM vcpus, TSM uses shared memory to exposes gprs for the trusted
VCPU. This change makes sure we use shmem when doing mmio emulation
for trusted VMs.
Signed-off-by: Rajnesh Kanwal <rkanwal@...osinc.com>
Signed-off-by: Atish Patra <atishp@...osinc.com>
---
arch/riscv/kvm/vcpu_insn.c | 98 +++++++++++++++++++++++++++++++++-----
1 file changed, 85 insertions(+), 13 deletions(-)
diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c
index 331489f..56eeb86 100644
--- a/arch/riscv/kvm/vcpu_insn.c
+++ b/arch/riscv/kvm/vcpu_insn.c
@@ -7,6 +7,9 @@
#include <linux/bitops.h>
#include <linux/kvm_host.h>
#include <asm/kvm_cove.h>
+#include <asm/kvm_nacl.h>
+#include <asm/kvm_cove_sbi.h>
+#include <asm/asm-offsets.h>
#define INSN_OPCODE_MASK 0x007c
#define INSN_OPCODE_SHIFT 2
@@ -116,6 +119,10 @@
#define REG_OFFSET(insn, pos) \
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
+#define REG_INDEX(insn, pos) \
+ ((SHIFT_RIGHT((insn), (pos)-LOG_REGBYTES) & REG_MASK) / \
+ (__riscv_xlen / 8))
+
#define REG_PTR(insn, pos, regs) \
((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
@@ -600,6 +607,7 @@ int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
int len = 0, insn_len = 0;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
+ void *nshmem;
/* Determine trapped instruction */
if (htinst & 0x1) {
@@ -627,7 +635,15 @@ int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
insn_len = INSN_LEN(insn);
}
- data = GET_RS2(insn, &vcpu->arch.guest_context);
+ if (is_cove_vcpu(vcpu)) {
+ nshmem = nacl_shmem();
+ data = nacl_shmem_gpr_read_cove(nshmem,
+ REG_INDEX(insn, SH_RS2) * 8 +
+ KVM_ARCH_GUEST_ZERO);
+ } else {
+ data = GET_RS2(insn, &vcpu->arch.guest_context);
+ }
+
data8 = data16 = data32 = data64 = data;
if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
@@ -643,19 +659,43 @@ int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
len = 8;
- data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
+ if (is_cove_vcpu(vcpu)) {
+ data64 = nacl_shmem_gpr_read_cove(
+ nshmem,
+ RVC_RS2S(insn) * 8 + KVM_ARCH_GUEST_ZERO);
+ } else {
+ data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
+ }
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
- data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
+ if (is_cove_vcpu(vcpu)) {
+ data64 = nacl_shmem_gpr_read_cove(
+ nshmem, REG_INDEX(insn, SH_RS2C) * 8 +
+ KVM_ARCH_GUEST_ZERO);
+ } else {
+ data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
+ }
#endif
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
len = 4;
- data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
+ if (is_cove_vcpu(vcpu)) {
+ data32 = nacl_shmem_gpr_read_cove(
+ nshmem,
+ RVC_RS2S(insn) * 8 + KVM_ARCH_GUEST_ZERO);
+ } else {
+ data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
+ }
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
- data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
+ if (is_cove_vcpu(vcpu)) {
+ data32 = nacl_shmem_gpr_read_cove(
+ nshmem, REG_INDEX(insn, SH_RS2C) * 8 +
+ KVM_ARCH_GUEST_ZERO);
+ } else {
+ data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
+ }
} else {
return -EOPNOTSUPP;
}
@@ -725,6 +765,7 @@ int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
u64 data64;
ulong insn;
int len, shift;
+ void *nshmem;
if (vcpu->arch.mmio_decode.return_handled)
return 0;
@@ -738,26 +779,57 @@ int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
len = vcpu->arch.mmio_decode.len;
shift = vcpu->arch.mmio_decode.shift;
+ if (is_cove_vcpu(vcpu))
+ nshmem = nacl_shmem();
+
switch (len) {
case 1:
data8 = *((u8 *)run->mmio.data);
- SET_RD(insn, &vcpu->arch.guest_context,
- (ulong)data8 << shift >> shift);
+ if (is_cove_vcpu(vcpu)) {
+ nacl_shmem_gpr_write_cove(nshmem,
+ REG_INDEX(insn, SH_RD) * 8 +
+ KVM_ARCH_GUEST_ZERO,
+ (unsigned long)data8);
+ } else {
+ SET_RD(insn, &vcpu->arch.guest_context,
+ (ulong)data8 << shift >> shift);
+ }
break;
case 2:
data16 = *((u16 *)run->mmio.data);
- SET_RD(insn, &vcpu->arch.guest_context,
- (ulong)data16 << shift >> shift);
+ if (is_cove_vcpu(vcpu)) {
+ nacl_shmem_gpr_write_cove(nshmem,
+ REG_INDEX(insn, SH_RD) * 8 +
+ KVM_ARCH_GUEST_ZERO,
+ (unsigned long)data16);
+ } else {
+ SET_RD(insn, &vcpu->arch.guest_context,
+ (ulong)data16 << shift >> shift);
+ }
break;
case 4:
data32 = *((u32 *)run->mmio.data);
- SET_RD(insn, &vcpu->arch.guest_context,
- (ulong)data32 << shift >> shift);
+ if (is_cove_vcpu(vcpu)) {
+ nacl_shmem_gpr_write_cove(nshmem,
+ REG_INDEX(insn, SH_RD) * 8 +
+ KVM_ARCH_GUEST_ZERO,
+ (unsigned long)data32);
+ } else {
+ SET_RD(insn, &vcpu->arch.guest_context,
+ (ulong)data32 << shift >> shift);
+ }
break;
case 8:
data64 = *((u64 *)run->mmio.data);
- SET_RD(insn, &vcpu->arch.guest_context,
- (ulong)data64 << shift >> shift);
+ if (is_cove_vcpu(vcpu)) {
+ nacl_shmem_gpr_write_cove(nshmem,
+ REG_INDEX(insn, SH_RD) * 8 +
+ KVM_ARCH_GUEST_ZERO,
+ (unsigned long)data64);
+ } else {
+ SET_RD(insn, &vcpu->arch.guest_context,
+ (ulong)data64 << shift >> shift);
+ }
break;
default:
return -EOPNOTSUPP;
--
2.25.1
Powered by blists - more mailing lists