[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAhSdy3ij--wR+=7gFQ03PFCiAA5OFBJfayU=Z7ODAwbP+pBaw@mail.gmail.com>
Date: Mon, 23 Sep 2019 09:12:44 +0530
From: Anup Patel <anup@...infault.org>
To: Anup Patel <Anup.Patel@....com>, Alexander Graf <graf@...zon.com>
Cc: Palmer Dabbelt <palmer@...ive.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim K <rkrcmar@...hat.com>,
Daniel Lezcano <daniel.lezcano@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Atish Patra <Atish.Patra@....com>,
Alistair Francis <Alistair.Francis@....com>,
Damien Le Moal <Damien.LeMoal@....com>,
Christoph Hellwig <hch@...radead.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-riscv@...ts.infradead.org" <linux-riscv@...ts.infradead.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v7 08/21] RISC-V: KVM: Implement KVM_GET_ONE_REG/KVM_SET_ONE_REG
ioctls
On Wed, Sep 4, 2019 at 9:44 PM Anup Patel <Anup.Patel@....com> wrote:
>
> For KVM RISC-V, we use KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls to access
> VCPU config and registers from user-space.
>
> We have three types of VCPU registers:
> 1. CONFIG - these are VCPU config and capabilities
> 2. CORE - these are VCPU general purpose registers
> 3. CSR - these are VCPU control and status registers
>
> The CONFIG registers available to user-space are ISA and TIMEBASE. Out
> of these, TIMEBASE is a read-only register which inform user-space about
> VCPU timer base frequency. The ISA register is a read and write register
> where user-space can only write the desired VCPU ISA capabilities before
> running the VCPU.
>
> The CORE registers available to user-space are PC, RA, SP, GP, TP, A0-A7,
> T0-T6, S0-S11 and MODE. Most of these are RISC-V general registers except
> PC and MODE. The PC register represents program counter whereas the MODE
> register represent VCPU privilege mode (i.e. S/U-mode).
>
> The CSRs available to user-space are SSTATUS, SIE, STVEC, SSCRATCH, SEPC,
> SCAUSE, STVAL, SIP, and SATP. All of these are read/write registers.
>
> In future, more VCPU register types will be added (such as FP) for the
> KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls.
>
> Signed-off-by: Anup Patel <anup.patel@....com>
> Acked-by: Paolo Bonzini <pbonzini@...hat.com>
> Reviewed-by: Paolo Bonzini <pbonzini@...hat.com>
> ---
> arch/riscv/include/uapi/asm/kvm.h | 46 +++++-
> arch/riscv/kvm/vcpu.c | 235 +++++++++++++++++++++++++++++-
> 2 files changed, 278 insertions(+), 3 deletions(-)
>
> diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
> index 6dbc056d58ba..08c4515ad71b 100644
> --- a/arch/riscv/include/uapi/asm/kvm.h
> +++ b/arch/riscv/include/uapi/asm/kvm.h
> @@ -23,8 +23,15 @@
>
> /* for KVM_GET_REGS and KVM_SET_REGS */
> struct kvm_regs {
> + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
> + struct user_regs_struct regs;
> + unsigned long mode;
> };
As discussed in LPC 2019 with Alex Graf, I will add separate
struct for CORE registers instead of re-using "struct kvm_regs".
>
> +/* Possible privilege modes for kvm_regs */
> +#define KVM_RISCV_MODE_S 1
> +#define KVM_RISCV_MODE_U 0
> +
> /* for KVM_GET_FPU and KVM_SET_FPU */
> struct kvm_fpu {
> };
> @@ -41,10 +48,47 @@ struct kvm_guest_debug_arch {
> struct kvm_sync_regs {
> };
>
> -/* dummy definition */
> +/* for KVM_GET_SREGS and KVM_SET_SREGS */
> struct kvm_sregs {
> + unsigned long sstatus;
> + unsigned long sie;
> + unsigned long stvec;
> + unsigned long sscratch;
> + unsigned long sepc;
> + unsigned long scause;
> + unsigned long stval;
> + unsigned long sip;
> + unsigned long satp;
> +};
Same as above, I will add separate struct for CSR registers instead
of re-using "struct kvm_sregs".
> +
> +/* for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
> +struct kvm_riscv_config {
> + unsigned long isa;
> + unsigned long tbfreq;
> };
>
> +#define KVM_REG_SIZE(id) \
> + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
> +
> +/* If you need to interpret the index values, here is the key: */
> +#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
> +#define KVM_REG_RISCV_TYPE_SHIFT 24
> +
> +/* Config registers are mapped as type 1 */
> +#define KVM_REG_RISCV_CONFIG (0x01 << KVM_REG_RISCV_TYPE_SHIFT)
> +#define KVM_REG_RISCV_CONFIG_REG(name) \
> + (offsetof(struct kvm_riscv_config, name) / sizeof(unsigned long))
> +
> +/* Core registers are mapped as type 2 */
> +#define KVM_REG_RISCV_CORE (0x02 << KVM_REG_RISCV_TYPE_SHIFT)
> +#define KVM_REG_RISCV_CORE_REG(name) \
> + (offsetof(struct kvm_regs, name) / sizeof(unsigned long))
> +
> +/* Control and status registers are mapped as type 3 */
> +#define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT)
> +#define KVM_REG_RISCV_CSR_REG(name) \
> + (offsetof(struct kvm_sregs, name) / sizeof(unsigned long))
> +
> #endif
>
> #endif /* __LINUX_KVM_RISCV_H */
> diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
> index 3223f723f79e..b95dfc959009 100644
> --- a/arch/riscv/kvm/vcpu.c
> +++ b/arch/riscv/kvm/vcpu.c
> @@ -165,6 +165,215 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
> return VM_FAULT_SIGBUS;
> }
>
> +static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + unsigned long __user *uaddr =
> + (unsigned long __user *)(unsigned long)reg->addr;
> + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
> + KVM_REG_SIZE_MASK |
> + KVM_REG_RISCV_CONFIG);
> + unsigned long reg_val;
> +
> + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
> + return -EINVAL;
> +
> + switch (reg_num) {
> + case KVM_REG_RISCV_CONFIG_REG(isa):
> + reg_val = vcpu->arch.isa;
> + break;
> + case KVM_REG_RISCV_CONFIG_REG(tbfreq):
> + reg_val = riscv_timebase;
> + break;
> + default:
> + return -EINVAL;
> + };
> +
> + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + unsigned long __user *uaddr =
> + (unsigned long __user *)(unsigned long)reg->addr;
> + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
> + KVM_REG_SIZE_MASK |
> + KVM_REG_RISCV_CONFIG);
> + unsigned long reg_val;
> +
> + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
> + return -EINVAL;
> +
> + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
> + return -EFAULT;
> +
> + switch (reg_num) {
> + case KVM_REG_RISCV_CONFIG_REG(isa):
> + if (!vcpu->arch.ran_atleast_once) {
> + vcpu->arch.isa = reg_val;
> + vcpu->arch.isa &= riscv_isa_extension_base(NULL);
> + vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
> + } else {
> + return -ENOTSUPP;
> + }
> + break;
> + case KVM_REG_RISCV_CONFIG_REG(tbfreq):
> + return -ENOTSUPP;
> + default:
> + return -EINVAL;
> + };
> +
> + return 0;
> +}
> +
> +static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
> + unsigned long __user *uaddr =
> + (unsigned long __user *)(unsigned long)reg->addr;
> + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
> + KVM_REG_SIZE_MASK |
> + KVM_REG_RISCV_CORE);
> + unsigned long reg_val;
> +
> + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
> + return -EINVAL;
> +
> + if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
> + reg_val = cntx->sepc;
> + else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
> + reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
> + reg_val = ((unsigned long *)cntx)[reg_num];
> + else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
> + reg_val = (cntx->sstatus & SR_SPP) ?
> + KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
> + else
> + return -EINVAL;
> +
> + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
> + unsigned long __user *uaddr =
> + (unsigned long __user *)(unsigned long)reg->addr;
> + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
> + KVM_REG_SIZE_MASK |
> + KVM_REG_RISCV_CORE);
> + unsigned long reg_val;
> +
> + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
> + return -EINVAL;
> +
> + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
> + return -EFAULT;
> +
> + if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
> + cntx->sepc = reg_val;
> + else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
> + reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
> + ((unsigned long *)cntx)[reg_num] = reg_val;
> + else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
> + if (reg_val == KVM_RISCV_MODE_S)
> + cntx->sstatus |= SR_SPP;
> + else
> + cntx->sstatus &= ~SR_SPP;
> + } else
> + return -EINVAL;
> +
> + return 0;
> +}
> +
> +static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
> + unsigned long __user *uaddr =
> + (unsigned long __user *)(unsigned long)reg->addr;
> + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
> + KVM_REG_SIZE_MASK |
> + KVM_REG_RISCV_CSR);
> + unsigned long reg_val;
> +
> + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
> + return -EINVAL;
> + if (reg_num >= sizeof(struct kvm_sregs) / sizeof(unsigned long))
> + return -EINVAL;
> +
> + if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
> + kvm_riscv_vcpu_flush_interrupts(vcpu);
> +
> + reg_val = ((unsigned long *)csr)[reg_num];
> +
> + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
> + return -EFAULT;
> +
> + return 0;
> +}
> +
> +static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
> + unsigned long __user *uaddr =
> + (unsigned long __user *)(unsigned long)reg->addr;
> + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
> + KVM_REG_SIZE_MASK |
> + KVM_REG_RISCV_CSR);
> + unsigned long reg_val;
> +
> + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
> + return -EINVAL;
> + if (reg_num >= sizeof(struct kvm_sregs) / sizeof(unsigned long))
> + return -EINVAL;
> +
> + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
> + return -EFAULT;
> +
> + ((unsigned long *)csr)[reg_num] = reg_val;
> +
> + if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
> + WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
> +
> + return 0;
> +}
> +
> +static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
> + return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
> + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
> + return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
> + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
> + return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
> +
> + return -EINVAL;
> +}
> +
> +static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
> + const struct kvm_one_reg *reg)
> +{
> + if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
> + return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
> + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
> + return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
> + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
> + return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
> +
> + return -EINVAL;
> +}
> +
> long kvm_arch_vcpu_async_ioctl(struct file *filp,
> unsigned int ioctl, unsigned long arg)
> {
> @@ -189,8 +398,30 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
> long kvm_arch_vcpu_ioctl(struct file *filp,
> unsigned int ioctl, unsigned long arg)
> {
> - /* TODO: */
> - return -EINVAL;
> + struct kvm_vcpu *vcpu = filp->private_data;
> + void __user *argp = (void __user *)arg;
> + long r = -EINVAL;
> +
> + switch (ioctl) {
> + case KVM_SET_ONE_REG:
> + case KVM_GET_ONE_REG: {
> + struct kvm_one_reg reg;
> +
> + r = -EFAULT;
> + if (copy_from_user(®, argp, sizeof(reg)))
> + break;
> +
> + if (ioctl == KVM_SET_ONE_REG)
> + r = kvm_riscv_vcpu_set_reg(vcpu, ®);
> + else
> + r = kvm_riscv_vcpu_get_reg(vcpu, ®);
> + break;
> + }
> + default:
> + break;
> + }
> +
> + return r;
> }
>
> int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
> --
> 2.17.1
>
Regards,
Anup
Powered by blists - more mailing lists