[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20260109034532.1012993-9-seanjc@google.com>
Date: Thu, 8 Jan 2026 19:45:32 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Chao Gao <chao.gao@...el.com>
Subject: [PATCH v4 8/8] KVM: x86: Update APICv ISR (a.k.a. SVI) as part of kvm_apic_update_apicv()
Fold the calls to .hwapic_isr_update() in kvm_apic_set_state(),
kvm_lapic_reset(), and __kvm_vcpu_update_apicv() into
kvm_apic_update_apicv(), as updating SVI is directly related to updating
KVM's own cache of ISR information, e.g. SVI is more or less the APICv
equivalent of highest_isr_cache.
Note, calling .hwapic_isr_update() during kvm_apic_update_apicv() has
benign side effects, as doing so changes the orders of the calls in
kvm_lapic_reset() and kvm_apic_set_state(), specifically with respect to
to the order between .hwapic_isr_update() and .apicv_post_state_restore().
However, the changes in ordering are glorified nops as the former hook is
VMX-only and the latter is SVM-only.
Reviewed-by: Chao Gao <chao.gao@...el.com>
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/lapic.c | 31 ++++++++++++-------------------
arch/x86/kvm/lapic.h | 1 -
arch/x86/kvm/x86.c | 7 -------
3 files changed, 12 insertions(+), 27 deletions(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 1597dd0b0cc6..9b791e728ec1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -770,17 +770,6 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
}
}
-void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- if (WARN_ON_ONCE(!lapic_in_kernel(vcpu)) || !apic->apicv_active)
- return;
-
- kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
-}
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_hwapic_isr);
-
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
/* This may race with setting of irr in __apic_accept_irq() and
@@ -2785,10 +2774,18 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
*/
apic->irr_pending = true;
- if (apic->apicv_active)
+ /*
+ * Update SVI when APICv gets enabled, otherwise SVI won't reflect the
+ * highest bit in vISR and the next accelerated EOI in the guest won't
+ * be virtualized correctly (the CPU uses SVI to determine which vISR
+ * vector to clear).
+ */
+ if (apic->apicv_active) {
apic->isr_count = 1;
- else
+ kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
+ } else {
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+ }
apic->highest_isr_cache = -1;
}
@@ -2916,10 +2913,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
- if (apic->apicv_active) {
+ if (apic->apicv_active)
kvm_x86_call(apicv_post_state_restore)(vcpu);
- kvm_x86_call(hwapic_isr_update)(vcpu, -1);
- }
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
@@ -3232,10 +3227,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
__start_apic_timer(apic, APIC_TMCCT);
kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
kvm_apic_update_apicv(vcpu);
- if (apic->apicv_active) {
+ if (apic->apicv_active)
kvm_x86_call(apicv_post_state_restore)(vcpu);
- kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
- }
kvm_make_request(KVM_REQ_EVENT, vcpu);
#ifdef CONFIG_KVM_IOAPIC
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 282b9b7da98c..aa0a9b55dbb7 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -126,7 +126,6 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated);
int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
-void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ff8812f3a129..0c6d899d53dd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10886,16 +10886,9 @@ void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
* pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
* still active when the interrupt got accepted. Make sure
* kvm_check_and_inject_events() is called to check for that.
- *
- * Update SVI when APICv gets enabled, otherwise SVI won't reflect the
- * highest bit in vISR and the next accelerated EOI in the guest won't
- * be virtualized correctly (the CPU uses SVI to determine which vISR
- * vector to clear).
*/
if (!apic->apicv_active)
kvm_make_request(KVM_REQ_EVENT, vcpu);
- else
- kvm_apic_update_hwapic_isr(vcpu);
out:
preempt_enable();
--
2.52.0.457.g6b5491de43-goog
Powered by blists - more mailing lists