[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <974185d11c41c8019036e153e95a96e0c2712d6c.camel@redhat.com>
Date: Fri, 28 Feb 2025 20:37:37 -0500
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Yosry Ahmed <yosry.ahmed@...ux.dev>, Sean Christopherson
<seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH 04/13] KVM: SVM: Introduce helpers for updating
TLB_CONTROL
On Wed, 2025-02-05 at 18:23 +0000, Yosry Ahmed wrote:
> Introduce helpers for updating TLB_CONTROL in the VMCB instead of
> directly setting it. Two helpers are introduced:
>
> - svm_add_tlb_ctl_flush(): Combines a new TLB_CONTROL value with the
> existing one.
>
> - svm_clear_tlb_ctl_flush(): Clears the TLB_CONTROL field.
>
> The goal is to prevent overwriting a TLB_CONTROL value with something
> that results in less TLB entries being flushed. This does not currently
> happen as KVM only sets TLB_CONTROL_FLUSH_ASID when servicing a flush
> request, and TLB_CONTROL_FLUSH_ALL_ASID when allocating a new ASID. The
> latter always happens after the former so no unsafe overwrite happens.
>
> However, future changes may result in subtle bugs where the TLB_CONTROL
> field is incorrectly overwritten. The new helpers prevent that.
>
> A separate helper is used for clearing the TLB flush because it is
> semantically different. In this case, KVM knowingly ignores the existing
> value of TLB_CONTROL. Also, although svm_add_tlb_ctl_flush() would just
> work for TLB_CONTROL_DO_NOTHING, the logic becomes inconsistent (use the
> biggest hammer unless no hammer at all is requested).
>
> Opportunistically move the TLB_CONTROL_* definitions to
> arch/x86/kvm/svm/svm.h as they are not used outside of
> arch/x86/kvm/svm/.
>
> No functional change intended.
>
> Signed-off-by: Yosry Ahmed <yosry.ahmed@...ux.dev>
> ---
> arch/x86/include/asm/svm.h | 6 ------
> arch/x86/kvm/svm/nested.c | 2 +-
> arch/x86/kvm/svm/sev.c | 2 +-
> arch/x86/kvm/svm/svm.c | 6 +++---
> arch/x86/kvm/svm/svm.h | 29 +++++++++++++++++++++++++++++
> 5 files changed, 34 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 2b59b9951c90e..e6bccf8f90982 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -169,12 +169,6 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
> };
> };
>
> -
> -#define TLB_CONTROL_DO_NOTHING 0
> -#define TLB_CONTROL_FLUSH_ALL_ASID 1
> -#define TLB_CONTROL_FLUSH_ASID 3
> -#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
> -
> #define V_TPR_MASK 0x0f
>
> #define V_IRQ_SHIFT 8
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 2eba36af44f22..0e9b0592c1f83 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -690,7 +690,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
> /* Done at vmrun: asid. */
>
> /* Also overwritten later if necessary. */
> - vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
> + svm_clear_tlb_ctl_flush(vmcb02);
>
> /* nested_cr3. */
> if (nested_npt_enabled(svm))
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index b0adfd0537d00..3af296d6c04f6 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -3481,7 +3481,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
> return;
>
> sd->sev_vmcbs[asid] = svm->vmcb;
> - svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
> + svm_add_tlb_ctl_flush(svm->vmcb, TLB_CONTROL_FLUSH_ASID);
> vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
> }
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 2108b48ba4959..a2d601cd4c283 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1985,7 +1985,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
> if (sd->next_asid > sd->max_asid) {
> ++sd->asid_generation;
> sd->next_asid = sd->min_asid;
> - svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
> + svm_add_tlb_ctl_flush(svm->vmcb, TLB_CONTROL_FLUSH_ALL_ASID);
> vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
> }
>
> @@ -3974,7 +3974,7 @@ static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu, struct kvm_vmcb_info *vmcb
> * VM-Exit (via kvm_mmu_reset_context()).
> */
> if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
> - vmcb->ptr->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
> + svm_add_tlb_ctl_flush(vmcb->ptr, TLB_CONTROL_FLUSH_ASID);
> else
> vmcb->asid_generation--;
> }
> @@ -4317,7 +4317,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
> svm->nested.nested_run_pending = 0;
> }
>
> - svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
> + svm_clear_tlb_ctl_flush(svm->vmcb);
> vmcb_mark_all_clean(svm->vmcb);
>
> /* if exit due to PF check for async PF */
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index ebbb0b1a64676..6a73d6ed1e428 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -611,6 +611,35 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
> void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
> int trig_mode, int vec);
>
> +#define TLB_CONTROL_DO_NOTHING 0
> +#define TLB_CONTROL_FLUSH_ALL_ASID 1
> +#define TLB_CONTROL_FLUSH_ASID 3
> +#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
> +
> +/*
> + * Clearing TLB flushes is done separately because combining
> + * TLB_CONTROL_DO_NOTHING with others is counter-intuitive.
> + */
> +static inline void svm_add_tlb_ctl_flush(struct vmcb *vmcb, u8 tlb_ctl)
> +{
> + if (WARN_ON_ONCE(tlb_ctl == TLB_CONTROL_DO_NOTHING))
> + return;
> +
> + /*
> + * Apply the least targeted (most inclusive) TLB flush. Apart from
> + * TLB_CONTROL_DO_NOTHING, lower values of tlb_ctl are less targeted.
> + */
> + if (vmcb->control.tlb_ctl == TLB_CONTROL_DO_NOTHING)
> + vmcb->control.tlb_ctl = tlb_ctl;
> + else
> + vmcb->control.tlb_ctl = min(vmcb->control.tlb_ctl, tlb_ctl);
> +}
> +
> +static inline void svm_clear_tlb_ctl_flush(struct vmcb *vmcb)
> +{
> + vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
> +}
> +
> /* nested.c */
>
> #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists