[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250401161106.790710-26-pbonzini@redhat.com>
Date: Tue, 1 Apr 2025 18:11:02 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: linux-kernel@...r.kernel.org,
kvm@...r.kernel.org
Cc: roy.hopkins@...e.com,
seanjc@...gle.com,
thomas.lendacky@....com,
ashish.kalra@....com,
michael.roth@....com,
jroedel@...e.de,
nsaenz@...zon.com,
anelkz@...zon.de,
James.Bottomley@...senPartnership.com
Subject: [PATCH 25/29] KVM: x86: handle interrupt priorities for planes
Force a userspace exit if an interrupt is delivered to a higher-priority
plane, where priority is represented by vcpu->run->req_exit_planes.
The set of planes with pending IRR are manipulated atomically and stored
in the plane-0 vCPU, since it is handy to reach from the target vCPU.
TODO: haven't put much thought into IPI virtualization.
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/include/asm/kvm_host.h | 7 +++++
arch/x86/kvm/lapic.c | 36 +++++++++++++++++++++++--
arch/x86/kvm/x86.c | 48 +++++++++++++++++++++++++++++++++
include/linux/kvm_host.h | 2 ++
4 files changed, 91 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9ac39f128a53..0344e8bed319 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -125,6 +125,7 @@
#define KVM_REQ_HV_TLB_FLUSH \
KVM_ARCH_REQ_FLAGS(32, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UPDATE_PROTECTED_GUEST_STATE KVM_ARCH_REQ(34)
+#define KVM_REQ_PLANE_INTERRUPT KVM_ARCH_REQ(35)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -864,6 +865,12 @@ struct kvm_vcpu_arch {
u64 xcr0;
u64 guest_supported_xcr0;
+ /*
+ * Only valid in plane0. The bitmask of planes that received
+ * an interrupt, to be checked against req_exit_planes.
+ */
+ atomic_t irr_pending_planes;
+
struct kvm_pio_request pio;
void *pio_data;
void *sev_pio_data;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 16a0e2387f2c..21dbc539cbe7 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1311,6 +1311,39 @@ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
return ret;
}
+static void kvm_lapic_deliver_interrupt(struct kvm_vcpu *vcpu, struct kvm_lapic *apic,
+ int delivery_mode, int trig_mode, int vector)
+{
+ struct kvm_vcpu *plane0_vcpu = vcpu->plane0;
+ struct kvm_plane *running_plane;
+ u16 req_exit_planes;
+
+ kvm_x86_call(deliver_interrupt)(apic, delivery_mode, trig_mode, vector);
+
+ /*
+ * test_and_set_bit implies a memory barrier, so IRR is written before
+ * reading irr_pending_planes below...
+ */
+ if (!test_and_set_bit(vcpu->plane, &plane0_vcpu->arch.irr_pending_planes)) {
+ /*
+ * ... and also running_plane and req_exit_planes are read after writing
+ * irr_pending_planes. Both barriers pair with kvm_arch_vcpu_ioctl_run().
+ */
+ smp_mb__after_atomic();
+
+ running_plane = READ_ONCE(plane0_vcpu->running_plane);
+ if (!running_plane)
+ return;
+
+ req_exit_planes = READ_ONCE(plane0_vcpu->req_exit_planes);
+ if (!(req_exit_planes & BIT(vcpu->plane)))
+ return;
+
+ kvm_make_request(KVM_REQ_PLANE_INTERRUPT,
+ kvm_get_plane_vcpu(running_plane, vcpu->vcpu_id));
+ }
+}
+
/*
* Add a pending IRQ into lapic.
* Return 1 if successfully added and 0 if discarded.
@@ -1352,8 +1385,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR);
}
- kvm_x86_call(deliver_interrupt)(apic, delivery_mode,
- trig_mode, vector);
+ kvm_lapic_deliver_interrupt(vcpu, apic, delivery_mode, trig_mode, vector);
break;
case APIC_DM_REMRD:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index be4d7b97367b..4546d1049f43 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10960,6 +10960,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
}
+ if (kvm_check_request(KVM_REQ_PLANE_INTERRUPT, vcpu)) {
+ u16 irr_pending_planes = atomic_read(&vcpu->plane0->arch.irr_pending_planes);
+ u16 target = irr_pending_planes & vcpu->plane0->req_exit_planes;
+ if (target) {
+ vcpu->run->exit_reason = KVM_EXIT_PLANE_EVENT;
+ vcpu->run->plane_event.cause = KVM_PLANE_EVENT_INTERRUPT;
+ vcpu->run->plane_event.flags = 0;
+ vcpu->run->plane_event.pending_event_planes = irr_pending_planes;
+ vcpu->run->plane_event.target = target;
+ r = 0;
+ goto out;
+ }
+ }
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
@@ -11689,8 +11702,11 @@ static int kvm_vcpu_ioctl_run_plane(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
+ struct kvm_vcpu *plane0_vcpu = vcpu;
int plane_id = READ_ONCE(vcpu->run->plane);
struct kvm_plane *plane = vcpu->kvm->planes[plane_id];
+ u16 req_exit_planes = READ_ONCE(vcpu->run->req_exit_planes) & ~BIT(plane_id);
+ u16 irr_pending_planes;
int r;
if (plane_id) {
@@ -11698,8 +11714,40 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
mutex_lock_nested(&vcpu->mutex, 1);
}
+ if (plane0_vcpu->has_planes) {
+ plane0_vcpu->req_exit_planes = req_exit_planes;
+ plane0_vcpu->running_plane = plane;
+
+ /*
+ * Check for cross-plane interrupts that happened while outside KVM_RUN;
+ * write running_plane and req_exit_planes before reading irr_pending_planes.
+ * If an interrupt hasn't set irr_pending_planes yet, it will trigger
+ * KVM_REQ_PLANE_INTERRUPT itself in kvm_lapic_deliver_interrupt().
+ */
+ smp_mb__before_atomic();
+
+ irr_pending_planes = atomic_fetch_and(~BIT(plane_id), &plane0_vcpu->arch.irr_pending_planes);
+ if (req_exit_planes & irr_pending_planes)
+ kvm_make_request(KVM_REQ_PLANE_INTERRUPT, vcpu);
+ }
+
r = kvm_vcpu_ioctl_run_plane(vcpu);
+ if (plane0_vcpu->has_planes) {
+ smp_store_release(&plane0_vcpu->running_plane, NULL);
+
+ /*
+ * Clear irr_pending_planes before reading IRR; pairs with
+ * kvm_lapic_deliver_interrupt(). If this side doesn't see IRR set,
+ * the other side will certainly see the cleared bit irr_pending_planes
+ * and set it, and vice versa.
+ */
+ clear_bit(plane_id, &plane0_vcpu->arch.irr_pending_planes);
+ smp_mb__after_atomic();
+ if (kvm_lapic_find_highest_irr(vcpu))
+ atomic_or(BIT(plane_id), &plane0_vcpu->arch.irr_pending_planes);
+ }
+
if (plane_id)
mutex_unlock(&vcpu->mutex);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0b764951f461..442aed2b9cc6 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -346,6 +346,8 @@ struct kvm_vcpu {
/* Only valid on plane 0 */
bool has_planes;
bool wants_to_run;
+ u16 req_exit_planes;
+ struct kvm_plane *running_plane;
/* Shared for all planes */
struct kvm_run *run;
--
2.49.0
Powered by blists - more mailing lists