[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190729115544.17895-6-anup.patel@wdc.com>
Date: Mon, 29 Jul 2019 11:56:53 +0000
From: Anup Patel <Anup.Patel@....com>
To: Palmer Dabbelt <palmer@...ive.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim K <rkrcmar@...hat.com>
CC: Daniel Lezcano <daniel.lezcano@...aro.org>,
Thomas Gleixner <tglx@...utronix.de>,
Atish Patra <Atish.Patra@....com>,
Alistair Francis <Alistair.Francis@....com>,
Damien Le Moal <Damien.LeMoal@....com>,
Christoph Hellwig <hch@...radead.org>,
Anup Patel <anup@...infault.org>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-riscv@...ts.infradead.org" <linux-riscv@...ts.infradead.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
Anup Patel <Anup.Patel@....com>
Subject: [RFC PATCH 05/16] RISC-V: KVM: Implement VCPU interrupts and requests
handling
This patch implements VCPU interrupts and requests which are both
asynchronous events.
The VCPU interrupts can be set/unset using KVM_INTERRUPT ioctl from
user-space. In future, the in-kernel IRQCHIP emulation will use
kvm_riscv_vcpu_set_interrupt() and kvm_riscv_vcpu_unset_interrupt()
functions to set/unset VCPU interrupts.
Important VCPU requests implemented by this patch are:
KVM_REQ_IRQ_PENDING - set whenever some VCPU interrupt pending
KVM_REQ_SLEEP - set whenever VCPU itself goes to sleep state
KVM_REQ_VCPU_RESET - set whenever VCPU reset is requested
The WFI trap-n-emulate (added later) will use KVM_REQ_SLEEP request
and kvm_riscv_vcpu_has_interrupt() function.
The KVM_REQ_VCPU_RESET request will be used by SBI emulation (added
later) to power-up a VCPU in power-off state. The user-space can use
the GET_MPSTATE/SET_MPSTATE ioctls to get/set power state of a VCPU.
Signed-off-by: Anup Patel <anup.patel@....com>
---
arch/riscv/include/asm/kvm_host.h | 13 +++
arch/riscv/include/uapi/asm/kvm.h | 3 +
arch/riscv/kvm/vcpu.c | 174 +++++++++++++++++++++++++++---
3 files changed, 177 insertions(+), 13 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 244eabe62710..aa89f1922da1 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -125,6 +125,13 @@ struct kvm_vcpu_arch {
/* CPU CSR context upon Guest VCPU reset */
struct kvm_vcpu_csr guest_reset_csr;
+ /* VCPU interrupts */
+ raw_spinlock_t irqs_lock;
+ unsigned long irqs_pending;
+
+ /* VCPU power-off state */
+ bool power_off;
+
/* Don't run the VCPU (blocked) */
bool pause;
};
@@ -146,6 +153,12 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
static inline void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch) {}
+int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+
void kvm_riscv_halt_guest(struct kvm *kvm);
void kvm_riscv_resume_guest(struct kvm *kvm);
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index d15875818b6e..6dbc056d58ba 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -18,6 +18,9 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_INTERRUPT_SET -1U
+#define KVM_INTERRUPT_UNSET -2U
+
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
};
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 1ae806f28c0e..c6f57caa95f0 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -42,6 +42,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{
+ unsigned long f;
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
@@ -50,6 +51,10 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
memcpy(csr, reset_csr, sizeof(*csr));
memcpy(cntx, reset_cntx, sizeof(*cntx));
+
+ raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+ vcpu->arch.irqs_pending = 0;
+ raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
@@ -103,6 +108,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
cntx->hstatus |= HSTATUS_SP2P;
cntx->hstatus |= HSTATUS_SPV;
+ /* Setup VCPU irqs lock */
+ raw_spin_lock_init(&vcpu->arch.irqs_lock);
+
/* Setup reset state of HEDELEG and HIDELEG CSRs */
csr = &vcpu->arch.guest_reset_csr;
csr->hedeleg = 0;
@@ -131,8 +139,15 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- /* TODO: */
- return 0;
+ int ret;
+ unsigned long f, irqs;
+
+ raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+ irqs = vcpu->arch.irqs_pending & vcpu->arch.guest_csr.vsie;
+ ret = (irqs & (1UL << IRQ_S_TIMER)) ? 1 : 0;
+ raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+ return ret;
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -145,20 +160,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- /* TODO: */
- return 0;
+ return (kvm_riscv_vcpu_has_interrupt(vcpu) &&
+ !vcpu->arch.power_off && !vcpu->arch.pause);
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
- /* TODO: */
- return 0;
+ return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
- /* TODO: */
- return false;
+ return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
}
bool kvm_arch_has_vcpu_debugfs(void)
@@ -179,7 +192,21 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
- /* TODO; */
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+
+ if (ioctl == KVM_INTERRUPT) {
+ struct kvm_interrupt irq;
+
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ return -EFAULT;
+
+ if (irq.irq == KVM_INTERRUPT_SET)
+ return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_S_EXT);
+ else
+ return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_EXT);
+ }
+
return -ENOIOCTLCMD;
}
@@ -228,18 +255,113 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
+static void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
+{
+ unsigned long f;
+
+ raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+ if (vcpu->arch.irqs_pending ^ vcpu->arch.guest_csr.vsip) {
+ csr_write(CSR_VSIP, vcpu->arch.irqs_pending);
+ vcpu->arch.guest_csr.vsip = vcpu->arch.irqs_pending;
+ }
+ raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+}
+
+static void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.guest_csr.vsip = csr_read(CSR_VSIP);
+ vcpu->arch.guest_csr.vsie = csr_read(CSR_VSIE);
+}
+
+int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+ unsigned long f;
+
+ if (irq != IRQ_S_SOFT &&
+ irq != IRQ_S_TIMER &&
+ irq != IRQ_S_EXT)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+ vcpu->arch.irqs_pending |= (1UL << irq);
+ raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+ kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+ kvm_vcpu_kick(vcpu);
+
+ return 0;
+}
+
+int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+ unsigned long f;
+
+ if (irq != IRQ_S_SOFT &&
+ irq != IRQ_S_TIMER &&
+ irq != IRQ_S_EXT)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+ vcpu->arch.irqs_pending &= ~(1UL << irq);
+ raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+ return 0;
+}
+
+bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ bool ret = false;
+ unsigned long f;
+
+ raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+ if (vcpu->arch.irqs_pending & vcpu->arch.guest_csr.vsie)
+ ret = true;
+ raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+ return ret;
+}
+
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.power_off = true;
+ kvm_make_request(KVM_REQ_SLEEP, vcpu);
+ kvm_vcpu_kick(vcpu);
+}
+
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.power_off = false;
+ kvm_vcpu_wake_up(vcpu);
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- /* TODO: */
+ if (vcpu->arch.power_off)
+ mp_state->mp_state = KVM_MP_STATE_STOPPED;
+ else
+ mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+
return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
- /* TODO: */
- return 0;
+ int ret = 0;
+
+ switch (mp_state->mp_state) {
+ case KVM_MP_STATE_RUNNABLE:
+ vcpu->arch.power_off = false;
+ break;
+ case KVM_MP_STATE_STOPPED:
+ kvm_riscv_vcpu_power_off(vcpu);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -263,8 +385,25 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
{
+ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
if (kvm_request_pending(vcpu)) {
- /* TODO: */
+ if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
+ swait_event_interruptible_exclusive(*wq,
+ ((!vcpu->arch.power_off) &&
+ (!vcpu->arch.pause)));
+
+ if (vcpu->arch.power_off || vcpu->arch.pause) {
+ /*
+ * Awaken to handle a signal, request to
+ * sleep again later.
+ */
+ kvm_make_request(KVM_REQ_SLEEP, vcpu);
+ }
+ }
+
+ if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+ kvm_riscv_reset_vcpu(vcpu);
/*
* Clear IRQ_PENDING requests that were made to guarantee
@@ -317,6 +456,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->exit_reason = KVM_EXIT_INTR;
}
+ /*
+ * We might have got VCPU interrupts updated asynchronously
+ * so update it in HW.
+ */
+ kvm_riscv_vcpu_flush_interrupts(vcpu);
+
/*
* Ensure we set mode to IN_GUEST_MODE after we disable
* interrupts and before the final VCPU requests check.
@@ -347,6 +492,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
scause = csr_read(CSR_SCAUSE);
stval = csr_read(CSR_STVAL);
+ /* Syncup interrupts state with HW */
+ kvm_riscv_vcpu_sync_interrupts(vcpu);
+
/*
* We may have taken a host interrupt in VS/VU-mode (i.e.
* while executing the guest). This interrupt is still
--
2.17.1
Powered by blists - more mailing lists