[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190805134201.2814-18-anup.patel@wdc.com>
Date: Mon, 5 Aug 2019 13:44:23 +0000
From: Anup Patel <Anup.Patel@....com>
To: Palmer Dabbelt <palmer@...ive.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim K <rkrcmar@...hat.com>
CC: Daniel Lezcano <daniel.lezcano@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Atish Patra <Atish.Patra@....com>,
Alistair Francis <Alistair.Francis@....com>,
Damien Le Moal <Damien.LeMoal@....com>,
Christoph Hellwig <hch@...radead.org>,
Anup Patel <anup@...infault.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-riscv@...ts.infradead.org" <linux-riscv@...ts.infradead.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Anup Patel <Anup.Patel@....com>
Subject: [PATCH v3 17/19] RISC-V: KVM: Add SBI v0.1 support
From: Atish Patra <atish.patra@....com>
The KVM host kernel running in HS-mode needs to handle SBI calls coming
from guest kernel running in VS-mode.
This patch adds SBI v0.1 support in KVM RISC-V. All the SBI calls are
implemented correctly except remote tlb flushes. For remote TLB flushes,
we are doing full TLB flush and this will be optimized in future.
Signed-off-by: Atish Patra <atish.patra@....com>
Signed-off-by: Anup Patel <anup.patel@....com>
---
arch/riscv/include/asm/kvm_host.h | 2 +
arch/riscv/kvm/Makefile | 2 +-
arch/riscv/kvm/vcpu_exit.c | 3 +
arch/riscv/kvm/vcpu_sbi.c | 119 ++++++++++++++++++++++++++++++
4 files changed, 125 insertions(+), 1 deletion(-)
create mode 100644 arch/riscv/kvm/vcpu_sbi.c
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 983aea4f6049..9a673f18d772 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -238,4 +238,6 @@ bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu);
+
#endif /* __RISCV_KVM_HOST_H__ */
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 3e0c7558320d..b56dc1650d2c 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -9,6 +9,6 @@ ccflags-y := -Ivirt/kvm -Iarch/riscv/kvm
kvm-objs := $(common-objs-y)
kvm-objs += main.o vm.o vmid.o tlb.o mmu.o
-kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o
+kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o vcpu_sbi.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index fbc04fe335ad..87b83fcf9a14 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -534,6 +534,9 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
(vcpu->arch.guest_context.hstatus & HSTATUS_STL))
ret = stage2_page_fault(vcpu, run, scause, stval);
break;
+ case EXC_SUPERVISOR_SYSCALL:
+ if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
+ ret = kvm_riscv_vcpu_sbi_ecall(vcpu);
default:
break;
};
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
new file mode 100644
index 000000000000..5793202eb514
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@....com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/kvm_vcpu_timer.h>
+
+#define SBI_VERSION_MAJOR 0
+#define SBI_VERSION_MINOR 1
+
+/* TODO: Handle traps due to unpriv load and redirect it back to VS-mode */
+static unsigned long kvm_sbi_unpriv_load(const unsigned long *addr,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned long flags, val;
+ unsigned long __hstatus, __sstatus;
+
+ local_irq_save(flags);
+ __hstatus = csr_read(CSR_HSTATUS);
+ __sstatus = csr_read(CSR_SSTATUS);
+ csr_write(CSR_HSTATUS, vcpu->arch.guest_context.hstatus | HSTATUS_SPRV);
+ csr_write(CSR_SSTATUS, vcpu->arch.guest_context.sstatus);
+ val = *addr;
+ csr_write(CSR_HSTATUS, __hstatus);
+ csr_write(CSR_SSTATUS, __sstatus);
+ local_irq_restore(flags);
+
+ return val;
+}
+
+static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, u32 type)
+{
+ int i;
+ struct kvm_vcpu *tmp;
+
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+ tmp->arch.power_off = true;
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+
+ memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
+ vcpu->run->system_event.type = type;
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+}
+
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu)
+{
+ int ret = 1;
+ u64 next_cycle;
+ int vcpuid;
+ struct kvm_vcpu *remote_vcpu;
+ ulong dhart_mask;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+
+ if (!cp)
+ return -EINVAL;
+ switch (cp->a7) {
+ case SBI_SET_TIMER:
+#if __riscv_xlen == 32
+ next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+ next_cycle = (u64)cp->a0;
+#endif
+ kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+ break;
+ case SBI_CONSOLE_PUTCHAR:
+ /* Not implemented */
+ cp->a0 = -ENOTSUPP;
+ break;
+ case SBI_CONSOLE_GETCHAR:
+ /* Not implemented */
+ cp->a0 = -ENOTSUPP;
+ break;
+ case SBI_CLEAR_IPI:
+ kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_SOFT);
+ break;
+ case SBI_SEND_IPI:
+ dhart_mask = kvm_sbi_unpriv_load((unsigned long *)cp->a0, vcpu);
+ for_each_set_bit(vcpuid, &dhart_mask, BITS_PER_LONG) {
+ remote_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, vcpuid);
+ kvm_riscv_vcpu_set_interrupt(remote_vcpu, IRQ_S_SOFT);
+ }
+ break;
+ case SBI_SHUTDOWN:
+ kvm_sbi_system_shutdown(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
+ ret = 0;
+ break;
+ case SBI_REMOTE_FENCE_I:
+ sbi_remote_fence_i(NULL);
+ break;
+ /*
+ * TODO: There should be a way to call remote hfence.bvma.
+ * Preferred method is now a SBI call. Until then, just flush
+ * all tlbs.
+ */
+ case SBI_REMOTE_SFENCE_VMA:
+ /*TODO: Parse vma range.*/
+ sbi_remote_sfence_vma(NULL, 0, 0);
+ break;
+ case SBI_REMOTE_SFENCE_VMA_ASID:
+ /*TODO: Parse vma range for given ASID */
+ sbi_remote_sfence_vma(NULL, 0, 0);
+ break;
+ default:
+ cp->a0 = ENOTSUPP;
+ break;
+ };
+
+ if (ret >= 0)
+ cp->sepc += 4;
+
+ return ret;
+}
--
2.17.1
Powered by blists - more mailing lists