[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAhSdy1RSpVCUzD+Aqbhh7aiQPmC2zdvuQfuOsmYNJrF3HxCsA@mail.gmail.com>
Date: Mon, 28 Apr 2025 17:46:01 +0530
From: Anup Patel <anup@...infault.org>
To: Radim Krčmář <rkrcmar@...tanamicro.com>
Cc: kvm-riscv@...ts.infradead.org, kvm@...r.kernel.org,
linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
Atish Patra <atishp@...shpatra.org>, Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>,
Alexandre Ghiti <alex@...ti.fr>, Andrew Jones <ajones@...tanamicro.com>,
Mayuresh Chitale <mchitale@...tanamicro.com>
Subject: Re: [PATCH 3/5] KVM: RISC-V: remove unnecessary SBI reset state
On Thu, Apr 3, 2025 at 5:02 PM Radim Krčmář <rkrcmar@...tanamicro.com> wrote:
>
> The SBI reset state has only two variables -- pc and a1.
> The rest is known, so keep only the necessary information.
>
> The reset structures make sense if we want userspace to control the
> reset state (which we do), but I'd still remove them now and reintroduce
> with the userspace interface later -- we could probably have just a
> single reset state per VM, instead of a reset state for each VCPU.
The SBI spec does not define the reset state of CPUs. The SBI
implementations (aka KVM RISC-V or OpenSBI) or platform
firmwares are free to clear additional registers as part system
reset or CPU.
As part of resetting the VCPU, the in-kernel KVM clears all
the registers.
The setting of PC, A0, and A1 is only an entry condition defined
for CPUs brought-up using SBI HSM start or SBI System suspend.
We should not go ahead with this patch.
Regards,
Anup
>
> Signed-off-by: Radim Krčmář <rkrcmar@...tanamicro.com>
> ---
> arch/riscv/include/asm/kvm_aia.h | 3 --
> arch/riscv/include/asm/kvm_host.h | 12 ++++---
> arch/riscv/kvm/aia_device.c | 4 +--
> arch/riscv/kvm/vcpu.c | 58 +++++++++++++++++--------------
> arch/riscv/kvm/vcpu_sbi.c | 9 +++--
> 5 files changed, 44 insertions(+), 42 deletions(-)
>
> diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
> index 1f37b600ca47..3b643b9efc07 100644
> --- a/arch/riscv/include/asm/kvm_aia.h
> +++ b/arch/riscv/include/asm/kvm_aia.h
> @@ -63,9 +63,6 @@ struct kvm_vcpu_aia {
> /* CPU AIA CSR context of Guest VCPU */
> struct kvm_vcpu_aia_csr guest_csr;
>
> - /* CPU AIA CSR context upon Guest VCPU reset */
> - struct kvm_vcpu_aia_csr guest_reset_csr;
> -
> /* Guest physical address of IMSIC for this VCPU */
> gpa_t imsic_addr;
>
> diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
> index 0e9c2fab6378..0c8c9c05af91 100644
> --- a/arch/riscv/include/asm/kvm_host.h
> +++ b/arch/riscv/include/asm/kvm_host.h
> @@ -193,6 +193,12 @@ struct kvm_vcpu_smstateen_csr {
> unsigned long sstateen0;
> };
>
> +struct kvm_vcpu_reset_state {
> + spinlock_t lock;
> + unsigned long pc;
> + unsigned long a1;
> +};
> +
> struct kvm_vcpu_arch {
> /* VCPU ran at least once */
> bool ran_atleast_once;
> @@ -227,12 +233,8 @@ struct kvm_vcpu_arch {
> /* CPU Smstateen CSR context of Guest VCPU */
> struct kvm_vcpu_smstateen_csr smstateen_csr;
>
> - /* CPU context upon Guest VCPU reset */
> - struct kvm_cpu_context guest_reset_context;
> - spinlock_t reset_cntx_lock;
> + struct kvm_vcpu_reset_state reset_state;
>
> - /* CPU CSR context upon Guest VCPU reset */
> - struct kvm_vcpu_csr guest_reset_csr;
>
> /*
> * VCPU interrupts
> diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
> index 39cd26af5a69..43e472ff3e1a 100644
> --- a/arch/riscv/kvm/aia_device.c
> +++ b/arch/riscv/kvm/aia_device.c
> @@ -526,12 +526,10 @@ int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
> void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
> {
> struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
> - struct kvm_vcpu_aia_csr *reset_csr =
> - &vcpu->arch.aia_context.guest_reset_csr;
>
> if (!kvm_riscv_aia_available())
> return;
> - memcpy(csr, reset_csr, sizeof(*csr));
> + memset(csr, 0, sizeof(*csr));
>
> /* Proceed only if AIA was initialized successfully */
> if (!kvm_riscv_aia_initialized(vcpu->kvm))
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index 2fb75288ecfe..b8485c1c1ce4 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -51,13 +51,40 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
> sizeof(kvm_vcpu_stats_desc),
> };
>
> -static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
> +static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu)
> {
> struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
> - struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
> struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
> - struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
> + struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
> void *vector_datap = cntx->vector.datap;
> +
> + memset(cntx, 0, sizeof(*cntx));
> + memset(csr, 0, sizeof(*csr));
> +
> + /* Restore datap as it's not a part of the guest context. */
> + cntx->vector.datap = vector_datap;
> +
> + /* Load SBI reset values */
> + cntx->a0 = vcpu->vcpu_id;
> +
> + spin_lock(&reset_state->lock);
> + cntx->sepc = reset_state->pc;
> + cntx->a1 = reset_state->a1;
> + spin_unlock(&reset_state->lock);
> +
> + /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
> + cntx->sstatus = SR_SPP | SR_SPIE;
> +
> + cntx->hstatus |= HSTATUS_VTW;
> + cntx->hstatus |= HSTATUS_SPVP;
> + cntx->hstatus |= HSTATUS_SPV;
> +
> + /* By default, make CY, TM, and IR counters accessible in VU mode */
> + csr->scounteren = 0x7;
> +}
> +
> +static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
> +{
> bool loaded;
>
> /**
> @@ -72,16 +99,10 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
>
> vcpu->arch.last_exit_cpu = -1;
>
> - memcpy(csr, reset_csr, sizeof(*csr));
> -
> - spin_lock(&vcpu->arch.reset_cntx_lock);
> - memcpy(cntx, reset_cntx, sizeof(*cntx));
> - spin_unlock(&vcpu->arch.reset_cntx_lock);
> + kvm_riscv_vcpu_context_reset(vcpu);
>
> kvm_riscv_vcpu_fp_reset(vcpu);
>
> - /* Restore datap as it's not a part of the guest context. */
> - cntx->vector.datap = vector_datap;
> kvm_riscv_vcpu_vector_reset(vcpu);
>
> kvm_riscv_vcpu_timer_reset(vcpu);
> @@ -113,8 +134,6 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
> int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> {
> int rc;
> - struct kvm_cpu_context *cntx;
> - struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
>
> spin_lock_init(&vcpu->arch.mp_state_lock);
>
> @@ -134,24 +153,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
> /* Setup VCPU hfence queue */
> spin_lock_init(&vcpu->arch.hfence_lock);
>
> - /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
> - spin_lock_init(&vcpu->arch.reset_cntx_lock);
> -
> - spin_lock(&vcpu->arch.reset_cntx_lock);
> - cntx = &vcpu->arch.guest_reset_context;
> - cntx->sstatus = SR_SPP | SR_SPIE;
> - cntx->hstatus = 0;
> - cntx->hstatus |= HSTATUS_VTW;
> - cntx->hstatus |= HSTATUS_SPVP;
> - cntx->hstatus |= HSTATUS_SPV;
> - spin_unlock(&vcpu->arch.reset_cntx_lock);
> + spin_lock_init(&vcpu->arch.reset_state.lock);
>
> if (kvm_riscv_vcpu_alloc_vector_context(vcpu))
> return -ENOMEM;
>
> - /* By default, make CY, TM, and IR counters accessible in VU mode */
> - reset_csr->scounteren = 0x7;
> -
> /* Setup VCPU timer */
> kvm_riscv_vcpu_timer_init(vcpu);
>
> diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
> index f58368f7df1d..3d7955e05cc3 100644
> --- a/arch/riscv/kvm/vcpu_sbi.c
> +++ b/arch/riscv/kvm/vcpu_sbi.c
> @@ -159,11 +159,10 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
> void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
> unsigned long pc, unsigned long a1)
> {
> - spin_lock(&vcpu->arch.reset_cntx_lock);
> - vcpu->arch.guest_reset_context.sepc = pc;
> - vcpu->arch.guest_reset_context.a0 = vcpu->vcpu_id;
> - vcpu->arch.guest_reset_context.a1 = a1;
> - spin_unlock(&vcpu->arch.reset_cntx_lock);
> + spin_lock(&vcpu->arch.reset_state.lock);
> + vcpu->arch.reset_state.pc = pc;
> + vcpu->arch.reset_state.a1 = a1;
> + spin_unlock(&vcpu->arch.reset_state.lock);
>
> kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
> }
> --
> 2.48.1
>
Powered by blists - more mailing lists