lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAOnJCUL0yOzKxi7BOrBp4Fax3TNT+j8LfZejHr0BC7=kP6vV6w@mail.gmail.com>
Date:   Thu, 21 Jul 2022 00:06:13 -0700
From:   Atish Patra <atishp@...shpatra.org>
To:     Anup Patel <anup@...infault.org>
Cc:     Atish Patra <atishp@...osinc.com>,
        "linux-kernel@...r.kernel.org List" <linux-kernel@...r.kernel.org>,
        Albert Ou <aou@...s.berkeley.edu>,
        Daniel Lezcano <daniel.lezcano@...aro.org>,
        Guo Ren <guoren@...nel.org>, Heiko Stuebner <heiko@...ech.de>,
        "open list:KERNEL VIRTUAL MACHINE FOR RISC-V (KVM/riscv)" 
        <kvm-riscv@...ts.infradead.org>, KVM General <kvm@...r.kernel.org>,
        linux-riscv <linux-riscv@...ts.infradead.org>,
        Liu Shaohua <liush@...winnertech.com>,
        Niklas Cassel <niklas.cassel@....com>,
        Palmer Dabbelt <palmer@...belt.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Paul Walmsley <paul.walmsley@...ive.com>,
        Philipp Tomsich <philipp.tomsich@...ll.eu>,
        Thomas Gleixner <tglx@...utronix.de>,
        Tsukasa OI <research_trasio@....a4lg.com>,
        Wei Fu <wefu@...hat.com>
Subject: Re: [PATCH v5 4/4] RISC-V: KVM: Support sstc extension

On Wed, Jul 20, 2022 at 9:33 PM Anup Patel <anup@...infault.org> wrote:
>
> On Thu, Jul 21, 2022 at 12:53 AM Atish Patra <atishp@...osinc.com> wrote:
> >
> > Sstc extension allows the guest to program the vstimecmp CSR directly
> > instead of making an SBI call to the hypervisor to program the next
> > event. The timer interrupt is also directly injected to the guest by
> > the hardware in this case. To maintain backward compatibility, the
> > hypervisors also update the vstimecmp in an SBI set_time call if
> > the hardware supports it. Thus, the older kernels in guest also
> > take advantage of the sstc extension.
> >
> > Reviewed-by: Anup Patel <anup@...infault.org>
> > Signed-off-by: Atish Patra <atishp@...osinc.com>
> > ---
> >  arch/riscv/include/asm/kvm_vcpu_timer.h |   7 ++
> >  arch/riscv/include/uapi/asm/kvm.h       |   1 +
> >  arch/riscv/kvm/vcpu.c                   |   7 +-
> >  arch/riscv/kvm/vcpu_timer.c             | 144 +++++++++++++++++++++++-
> >  4 files changed, 152 insertions(+), 7 deletions(-)
> >
> > diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h
> > index 50138e2eb91b..0d8fdb8ec63a 100644
> > --- a/arch/riscv/include/asm/kvm_vcpu_timer.h
> > +++ b/arch/riscv/include/asm/kvm_vcpu_timer.h
> > @@ -28,6 +28,11 @@ struct kvm_vcpu_timer {
> >         u64 next_cycles;
> >         /* Underlying hrtimer instance */
> >         struct hrtimer hrt;
> > +
> > +       /* Flag to check if sstc is enabled or not */
> > +       bool sstc_enabled;
> > +       /* A function pointer to switch between stimecmp or hrtimer at runtime */
> > +       int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles);
> >  };
> >
> >  int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles);
> > @@ -40,5 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
> >  int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
> >  void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
> >  void kvm_riscv_guest_timer_init(struct kvm *kvm);
> > +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
> > +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
> >
> >  #endif
> > diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
> > index 24b2a6e27698..9ac3dbaf0b0f 100644
> > --- a/arch/riscv/include/uapi/asm/kvm.h
> > +++ b/arch/riscv/include/uapi/asm/kvm.h
> > @@ -96,6 +96,7 @@ enum KVM_RISCV_ISA_EXT_ID {
> >         KVM_RISCV_ISA_EXT_H,
> >         KVM_RISCV_ISA_EXT_I,
> >         KVM_RISCV_ISA_EXT_M,
> > +       KVM_RISCV_ISA_EXT_SSTC,
>
> Please don't add a new ISA ext register in-between to maintain
> UAPI compatibility.
>

Sure. As SVPBMT was not merged yet, I thought it would be good to keep
them in canonical order.
I will move it as per your suggestion it in the next version.

> >         KVM_RISCV_ISA_EXT_SVPBMT,
> >         KVM_RISCV_ISA_EXT_MAX,
> >  };
> > diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> > index 5d271b597613..9ee6ad376eb2 100644
> > --- a/arch/riscv/kvm/vcpu.c
> > +++ b/arch/riscv/kvm/vcpu.c
> > @@ -51,6 +51,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
> >         RISCV_ISA_EXT_h,
> >         RISCV_ISA_EXT_i,
> >         RISCV_ISA_EXT_m,
> > +       RISCV_ISA_EXT_SSTC,
>
> Move this at the end of array as-per above.
>
> >         RISCV_ISA_EXT_SVPBMT,
> >  };
> >
> > @@ -203,7 +204,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
> >
> >  int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
> >  {
> > -       return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
> > +       return kvm_riscv_vcpu_timer_pending(vcpu);
> >  }
> >
> >  void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
> > @@ -785,6 +786,8 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
> >         if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT))
> >                 henvcfg |= ENVCFG_PBMTE;
> >
> > +       if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC))
> > +               henvcfg |= ENVCFG_STCE;
> >         csr_write(CSR_HENVCFG, henvcfg);
> >  #ifdef CONFIG_32BIT
> >         csr_write(CSR_HENVCFGH, henvcfg >> 32);
> > @@ -828,6 +831,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
> >                                      vcpu->arch.isa);
> >         kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
> >
> > +       kvm_riscv_vcpu_timer_save(vcpu);
> > +
> >         csr->vsstatus = csr_read(CSR_VSSTATUS);
> >         csr->vsie = csr_read(CSR_VSIE);
> >         csr->vstvec = csr_read(CSR_VSTVEC);
> > diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
> > index 595043857049..16f50c46ba39 100644
> > --- a/arch/riscv/kvm/vcpu_timer.c
> > +++ b/arch/riscv/kvm/vcpu_timer.c
> > @@ -69,7 +69,18 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
> >         return 0;
> >  }
> >
> > -int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
> > +static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
> > +{
> > +#if defined(CONFIG_32BIT)
> > +               csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
> > +               csr_write(CSR_VSTIMECMPH, ncycles >> 32);
> > +#else
> > +               csr_write(CSR_VSTIMECMP, ncycles);
> > +#endif
> > +               return 0;
> > +}
> > +
> > +static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
> >  {
> >         struct kvm_vcpu_timer *t = &vcpu->arch.timer;
> >         struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
> > @@ -88,6 +99,65 @@ int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
> >         return 0;
> >  }
> >
> > +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
> > +{
> > +       struct kvm_vcpu_timer *t = &vcpu->arch.timer;
> > +
> > +       return t->timer_next_event(vcpu, ncycles);
> > +}
> > +
> > +static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
> > +{
> > +       u64 delta_ns;
> > +       struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
> > +       struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
> > +       struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
> > +
> > +       if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
> > +               delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
> > +               hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
> > +               return HRTIMER_RESTART;
> > +       }
> > +
> > +       t->next_set = false;
> > +       kvm_vcpu_kick(vcpu);
> > +
> > +       return HRTIMER_NORESTART;
> > +}
> > +
> > +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
> > +{
> > +       struct kvm_vcpu_timer *t = &vcpu->arch.timer;
> > +       struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
> > +
> > +       if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
> > +           kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
> > +               return true;
> > +       else
> > +               return false;
> > +}
> > +
> > +static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
> > +{
> > +       struct kvm_vcpu_timer *t = &vcpu->arch.timer;
> > +       struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
> > +       u64 delta_ns;
> > +
> > +       if (!t->init_done)
> > +               return;
> > +
> > +       delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
> > +       if (delta_ns) {
> > +               hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
> > +               t->next_set = true;
> > +       }
> > +}
> > +
> > +static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
> > +{
> > +       kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
> > +}
> > +
> >  int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
> >                                  const struct kvm_one_reg *reg)
> >  {
> > @@ -180,10 +250,20 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
> >                 return -EINVAL;
> >
> >         hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
> > -       t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
> >         t->init_done = true;
> >         t->next_set = false;
> >
> > +       /* Enable sstc for every vcpu if available in hardware */
> > +       if (riscv_isa_extension_available(NULL, SSTC)) {
> > +               t->sstc_enabled = true;
> > +               t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
> > +               t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
> > +       } else {
> > +               t->sstc_enabled = false;
> > +               t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
> > +               t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
> > +       }
> > +
> >         return 0;
> >  }
> >
> > @@ -199,21 +279,73 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
> >
> >  int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
> >  {
> > +       struct kvm_vcpu_timer *t = &vcpu->arch.timer;
> > +
> > +       t->next_cycles = -1ULL;
> >         return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
> >  }
> >
> > -void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
> > +static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
> >  {
> >         struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
> >
> > -#ifdef CONFIG_64BIT
> > -       csr_write(CSR_HTIMEDELTA, gt->time_delta);
> > -#else
> > +#if defined(CONFIG_32BIT)
> >         csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
> >         csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
> > +#else
> > +       csr_write(CSR_HTIMEDELTA, gt->time_delta);
> >  #endif
> >  }
> >
> > +void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
> > +{
> > +       struct kvm_vcpu_csr *csr;
> > +       struct kvm_vcpu_timer *t = &vcpu->arch.timer;
> > +
> > +       kvm_riscv_vcpu_update_timedelta(vcpu);
> > +
> > +       if (!t->sstc_enabled)
> > +               return;
> > +
> > +       csr = &vcpu->arch.guest_csr;
> > +#if defined(CONFIG_32BIT)
> > +       csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
> > +       csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
> > +#else
> > +       csr_write(CSR_VSTIMECMP, t->next_cycles);
> > +#endif
> > +
> > +       /* timer should be enabled for the remaining operations */
> > +       if (unlikely(!t->init_done))
> > +               return;
> > +
> > +       kvm_riscv_vcpu_timer_unblocking(vcpu);
> > +}
> > +
> > +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
> > +{
> > +       struct kvm_vcpu_csr *csr;
> > +       struct kvm_vcpu_timer *t = &vcpu->arch.timer;
> > +
> > +       if (!t->sstc_enabled)
> > +               return;
> > +
> > +       csr = &vcpu->arch.guest_csr;
> > +       t = &vcpu->arch.timer;
> > +#if defined(CONFIG_32BIT)
> > +       t->next_cycles = csr_read(CSR_VSTIMECMP);
> > +       t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
> > +#else
> > +       t->next_cycles = csr_read(CSR_VSTIMECMP);
> > +#endif
> > +       /* timer should be enabled for the remaining operations */
> > +       if (unlikely(!t->init_done))
> > +               return;
> > +
> > +       if (kvm_vcpu_is_blocking(vcpu))
> > +               kvm_riscv_vcpu_timer_blocking(vcpu);
> > +}
> > +
> >  void kvm_riscv_guest_timer_init(struct kvm *kvm)
> >  {
> >         struct kvm_guest_timer *gt = &kvm->arch.timer;
> > --
> > 2.25.1
> >
>
> Regards,
> Anup



-- 
Regards,
Atish

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ