[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <352949ba-e4b8-477b-ad48-eb5e8aa57d86@redhat.com>
Date: Wed, 9 Apr 2025 12:34:38 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Joerg Roedel <joro@...tes.org>, David Woodhouse <dwmw2@...radead.org>,
Lu Baolu <baolu.lu@...ux.intel.com>, kvm@...r.kernel.org,
iommu@...ts.linux.dev, linux-kernel@...r.kernel.org,
Maxim Levitsky <mlevitsk@...hat.com>,
Joao Martins <joao.m.martins@...cle.com>, David Matlack <dmatlack@...gle.com>
Subject: Re: [PATCH 65/67] KVM: SVM: Generate GA log IRQs only if the
associated vCPUs is blocking
On 4/8/25 23:31, Sean Christopherson wrote:
> On Tue, Apr 08, 2025, Paolo Bonzini wrote:
>> On 4/4/25 21:39, Sean Christopherson wrote:
>>> @@ -892,7 +893,7 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool toggle_avic)
>>> WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
>>> - avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, toggle_avic);
>>> + avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, toggle_avic, false);
>>> spin_unlock_irqrestore(&svm->ir_list_lock, flags);
>>> }
>>> @@ -912,7 +913,8 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>> __avic_vcpu_load(vcpu, cpu, false);
>>> }
>>> -static void __avic_vcpu_put(struct kvm_vcpu *vcpu, bool toggle_avic)
>>> +static void __avic_vcpu_put(struct kvm_vcpu *vcpu, bool toggle_avic,
>>> + bool is_blocking)
>>
>> What would it look like to use an enum { SCHED_OUT, SCHED_IN, ENABLE_AVIC,
>> DISABLE_AVIC, START_BLOCKING } for both __avic_vcpu_put and
>> __avic_vcpu_load's second argument?
>
> There's gotta be a way to make it look better than this code. I gave a half-
> hearted attempt at using an enum before posting, but wasn't able to come up with
> anything decent.
>
> Coming back to it with fresh eyes, what about this (full on-top diff below)?
>
> enum avic_vcpu_action {
> AVIC_SCHED_IN = 0,
> AVIC_SCHED_OUT = 0,
> AVIC_START_BLOCKING = BIT(0),
>
> AVIC_TOGGLE_ON_OFF = BIT(1),
> AVIC_ACTIVATE = AVIC_TOGGLE_ON_OFF,
> AVIC_DEACTIVATE = AVIC_TOGGLE_ON_OFF,
> };
>
> AVIC_SCHED_IN and AVIC_SCHED_OUT are essentially syntactic sugar, as are
> AVIC_ACTIVATE and AVIC_DEACTIVATE to a certain extent. But it's much better than
> booleans, and using a bitmask makes avic_update_iommu_vcpu_affinity() slightly
> prettier.
Even just the bitmask at least makes it clear what is "true" and what is
"false" (which is obvious but I never thought about it this way, you
never stop learning).
You decide whether you prefer the syntactic sugar or not.
Paolo
>> Consecutive bools are ugly...
>
> Yeah, I hated it when I wrote it, and still hate it now.
>
> And more error prone, e.g. the __avic_vcpu_put() call from avic_refresh_apicv_exec_ctrl()
> should specify is_blocking=false, not true, as kvm_x86_ops.refresh_apicv_exec_ctrl()
> should never be called while the vCPU is blocking.
>
> ---
> arch/x86/kvm/svm/avic.c | 41 ++++++++++++++++++++++++++++-------------
> 1 file changed, 28 insertions(+), 13 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
> index 425674e1a04c..1752420c68aa 100644
> --- a/arch/x86/kvm/svm/avic.c
> +++ b/arch/x86/kvm/svm/avic.c
> @@ -833,9 +833,20 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
> return irq_set_vcpu_affinity(host_irq, NULL);
> }
>
> +enum avic_vcpu_action {
> + AVIC_SCHED_IN = 0,
> + AVIC_SCHED_OUT = 0,
> + AVIC_START_BLOCKING = BIT(0),
> +
> + AVIC_TOGGLE_ON_OFF = BIT(1),
> + AVIC_ACTIVATE = AVIC_TOGGLE_ON_OFF,
> + AVIC_DEACTIVATE = AVIC_TOGGLE_ON_OFF,
> +};
> +
> static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu,
> - bool toggle_avic, bool ga_log_intr)
> + enum avic_vcpu_action action)
> {
> + bool ga_log_intr = (action & AVIC_START_BLOCKING);
> struct amd_svm_iommu_ir *ir;
> struct vcpu_svm *svm = to_svm(vcpu);
>
> @@ -849,7 +860,7 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu,
> return;
>
> list_for_each_entry(ir, &svm->ir_list, node) {
> - if (!toggle_avic)
> + if (!(action & AVIC_TOGGLE_ON_OFF))
> WARN_ON_ONCE(amd_iommu_update_ga(ir->data, cpu, ga_log_intr));
> else if (cpu >= 0)
> WARN_ON_ONCE(amd_iommu_activate_guest_mode(ir->data, cpu, ga_log_intr));
> @@ -858,7 +869,8 @@ static void avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu,
> }
> }
>
> -static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool toggle_avic)
> +static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu,
> + enum avic_vcpu_action action)
> {
> struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
> int h_physical_id = kvm_cpu_get_apicid(cpu);
> @@ -904,7 +916,7 @@ static void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu, bool toggle_avic)
>
> WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
>
> - avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, toggle_avic, false);
> + avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, action);
>
> spin_unlock_irqrestore(&svm->ir_list_lock, flags);
> }
> @@ -921,11 +933,10 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> if (kvm_vcpu_is_blocking(vcpu))
> return;
>
> - __avic_vcpu_load(vcpu, cpu, false);
> + __avic_vcpu_load(vcpu, cpu, AVIC_SCHED_IN);
> }
>
> -static void __avic_vcpu_put(struct kvm_vcpu *vcpu, bool toggle_avic,
> - bool is_blocking)
> +static void __avic_vcpu_put(struct kvm_vcpu *vcpu, enum avic_vcpu_action action)
> {
> struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
> struct vcpu_svm *svm = to_svm(vcpu);
> @@ -947,7 +958,7 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu, bool toggle_avic,
> */
> spin_lock_irqsave(&svm->ir_list_lock, flags);
>
> - avic_update_iommu_vcpu_affinity(vcpu, -1, toggle_avic, is_blocking);
> + avic_update_iommu_vcpu_affinity(vcpu, -1, action);
>
> WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR);
>
> @@ -964,7 +975,7 @@ static void __avic_vcpu_put(struct kvm_vcpu *vcpu, bool toggle_avic,
> * Note! Don't set AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR in the table as
> * it's a synthetic flag that usurps an unused a should-be-zero bit.
> */
> - if (is_blocking)
> + if (action & AVIC_START_BLOCKING)
> entry |= AVIC_PHYSICAL_ID_ENTRY_GA_LOG_INTR;
>
> svm->avic_physical_id_entry = entry;
> @@ -992,7 +1003,8 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
> return;
> }
>
> - __avic_vcpu_put(vcpu, false, kvm_vcpu_is_blocking(vcpu));
> + __avic_vcpu_put(vcpu, kvm_vcpu_is_blocking(vcpu) ? AVIC_START_BLOCKING :
> + AVIC_SCHED_OUT);
> }
>
> void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
> @@ -1024,12 +1036,15 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
> if (!enable_apicv)
> return;
>
> + /* APICv should only be toggled on/off while the vCPU is running. */
> + WARN_ON_ONCE(kvm_vcpu_is_blocking(vcpu));
> +
> avic_refresh_virtual_apic_mode(vcpu);
>
> if (kvm_vcpu_apicv_active(vcpu))
> - __avic_vcpu_load(vcpu, vcpu->cpu, true);
> + __avic_vcpu_load(vcpu, vcpu->cpu, AVIC_ACTIVATE);
> else
> - __avic_vcpu_put(vcpu, true, true);
> + __avic_vcpu_put(vcpu, AVIC_DEACTIVATE);
> }
>
> void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
> @@ -1055,7 +1070,7 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
> * CPU and cause noisy neighbor problems if the VM is sending interrupts
> * to the vCPU while it's scheduled out.
> */
> - __avic_vcpu_put(vcpu, false, true);
> + __avic_vcpu_put(vcpu, AVIC_START_BLOCKING);
> }
>
> void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
>
> base-commit: fe5b44cf46d5444ff071bc2373fbe7b109a3f60b
Powered by blists - more mailing lists