[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d144652e-898b-bf6b-dc73-352fb1fffd40@amazon.com>
Date: Mon, 23 Sep 2019 09:01:36 +0200
From: Alexander Graf <graf@...zon.com>
To: Anup Patel <Anup.Patel@....com>,
Palmer Dabbelt <palmer@...ive.com>,
"Paul Walmsley" <paul.walmsley@...ive.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim K <rkrcmar@...hat.com>
CC: Daniel Lezcano <daniel.lezcano@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Atish Patra <Atish.Patra@....com>,
Alistair Francis <Alistair.Francis@....com>,
Damien Le Moal <Damien.LeMoal@....com>,
"Christoph Hellwig" <hch@...radead.org>,
Anup Patel <anup@...infault.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-riscv@...ts.infradead.org" <linux-riscv@...ts.infradead.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v7 18/21] RISC-V: KVM: Add SBI v0.1 support
On 04.09.19 18:16, Anup Patel wrote:
> From: Atish Patra <atish.patra@....com>
>
> The KVM host kernel running in HS-mode needs to handle SBI calls coming
> from guest kernel running in VS-mode.
>
> This patch adds SBI v0.1 support in KVM RISC-V. All the SBI calls are
> implemented correctly except remote tlb flushes. For remote TLB flushes,
> we are doing full TLB flush and this will be optimized in future.
>
> Signed-off-by: Atish Patra <atish.patra@....com>
> Signed-off-by: Anup Patel <anup.patel@....com>
> Acked-by: Paolo Bonzini <pbonzini@...hat.com>
> Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/riscv/include/asm/kvm_host.h | 2 +
> arch/riscv/kvm/Makefile | 2 +-
> arch/riscv/kvm/vcpu_exit.c | 3 +
> arch/riscv/kvm/vcpu_sbi.c | 104 ++++++++++++++++++++++++++++++
> 4 files changed, 110 insertions(+), 1 deletion(-)
> create mode 100644 arch/riscv/kvm/vcpu_sbi.c
>
> diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
> index 928c67828b1b..269bfa5641b1 100644
> --- a/arch/riscv/include/asm/kvm_host.h
> +++ b/arch/riscv/include/asm/kvm_host.h
> @@ -250,4 +250,6 @@ bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu);
> void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
> void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
>
> +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu);
> +
> #endif /* __RISCV_KVM_HOST_H__ */
> diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
> index 3e0c7558320d..b56dc1650d2c 100644
> --- a/arch/riscv/kvm/Makefile
> +++ b/arch/riscv/kvm/Makefile
> @@ -9,6 +9,6 @@ ccflags-y := -Ivirt/kvm -Iarch/riscv/kvm
> kvm-objs := $(common-objs-y)
>
> kvm-objs += main.o vm.o vmid.o tlb.o mmu.o
> -kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o
> +kvm-objs += vcpu.o vcpu_exit.o vcpu_switch.o vcpu_timer.o vcpu_sbi.o
>
> obj-$(CONFIG_KVM) += kvm.o
> diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
> index 39469f67b241..0ee4e8943f4f 100644
> --- a/arch/riscv/kvm/vcpu_exit.c
> +++ b/arch/riscv/kvm/vcpu_exit.c
> @@ -594,6 +594,9 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
> (vcpu->arch.guest_context.hstatus & HSTATUS_STL))
> ret = stage2_page_fault(vcpu, run, scause, stval);
> break;
> + case EXC_SUPERVISOR_SYSCALL:
> + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
> + ret = kvm_riscv_vcpu_sbi_ecall(vcpu);
implicit fall-through
> default:
> break;
> };
> diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
> new file mode 100644
> index 000000000000..b415b8b54bb1
> --- /dev/null
> +++ b/arch/riscv/kvm/vcpu_sbi.c
> @@ -0,0 +1,104 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/**
> + * Copyright (c) 2019 Western Digital Corporation or its affiliates.
> + *
> + * Authors:
> + * Atish Patra <atish.patra@....com>
> + */
> +
> +#include <linux/errno.h>
> +#include <linux/err.h>
> +#include <linux/kvm_host.h>
> +#include <asm/csr.h>
> +#include <asm/kvm_vcpu_timer.h>
> +
> +#define SBI_VERSION_MAJOR 0
> +#define SBI_VERSION_MINOR 1
> +
> +static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu, u32 type)
> +{
> + int i;
> + struct kvm_vcpu *tmp;
> +
> + kvm_for_each_vcpu(i, tmp, vcpu->kvm)
> + tmp->arch.power_off = true;
> + kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
> +
> + memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
> + vcpu->run->system_event.type = type;
> + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
Is there a particular reason this has to be implemented in kernel space?
It's not performance critical and all stopping vcpus is something user
space should be able to do as well, no?
> +}
> +
> +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu)
> +{
> + int i, ret = 1;
> + u64 next_cycle;
> + struct kvm_vcpu *rvcpu;
> + bool next_sepc = true;
> + ulong hmask, ut_scause = 0;
> + struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
> +
> + if (!cp)
> + return -EINVAL;
> +
> + switch (cp->a7) {
> + case SBI_SET_TIMER:
> +#if __riscv_xlen == 32
> + next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
> +#else
> + next_cycle = (u64)cp->a0;
> +#endif
> + kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
> + break;
> + case SBI_CLEAR_IPI:
> + kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_SOFT);
> + break;
> + case SBI_SEND_IPI:
> + hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
> + &ut_scause);
> + if (ut_scause) {
> + kvm_riscv_vcpu_trap_redirect(vcpu, ut_scause,
> + cp->a0);
> + next_sepc = false;
> + } else {
> + for_each_set_bit(i, &hmask, BITS_PER_LONG) {
> + rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
> + kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_S_SOFT);
> + }
> + }
> + break;
> + case SBI_SHUTDOWN:
> + kvm_sbi_system_shutdown(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
> + ret = 0;
> + break;
> + case SBI_REMOTE_FENCE_I:
> + sbi_remote_fence_i(NULL);
> + break;
> + /*
> + * TODO: There should be a way to call remote hfence.bvma.
> + * Preferred method is now a SBI call. Until then, just flush
> + * all tlbs.
> + */
> + case SBI_REMOTE_SFENCE_VMA:
> + /* TODO: Parse vma range. */
> + sbi_remote_sfence_vma(NULL, 0, 0);
> + break;
> + case SBI_REMOTE_SFENCE_VMA_ASID:
> + /* TODO: Parse vma range for given ASID */
> + sbi_remote_sfence_vma(NULL, 0, 0);
> + break;
> + default:
> + /*
> + * For now, just return error to Guest.
> + * TODO: In-future, we will route unsupported SBI calls
> + * to user-space.
> + */
> + cp->a0 = -ENOTSUPP;
> + break;
> + };
> +
> + if (ret >= 0)
> + cp->sepc += 4;
I don't see you ever setting ret except for shutdown?
Really, now is the time to plumb SBI calls down to user space. It allows
you to have a clean shutdown story from day 1.
Alex
Amazon Development Center Germany GmbH
Krausenstr. 38
10117 Berlin
Geschaeftsfuehrung: Christian Schlaeger, Ralf Herbrich
Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B
Sitz: Berlin
Ust-ID: DE 289 237 879
Powered by blists - more mailing lists