[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ff689320-19b4-118c-ddea-287487d2f1de@redhat.com>
Date: Mon, 7 Aug 2017 10:36:50 +0200
From: David Hildenbrand <david@...hat.com>
To: Radim Krčmář <rkrcmar@...hat.com>,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>
Subject: Re: [PATCH v2 3/3] KVM: x86: use general helpers for some cpuid
manipulation
On 05.08.2017 00:12, Radim Krčmář wrote:
> Add guest_cpuid_clear() and use it instead of kvm_find_cpuid_entry().
> Also replace some uses of kvm_find_cpuid_entry() with guest_cpuid_has().
>
> Signed-off-by: Radim Krčmář <rkrcmar@...hat.com>
> ---
> v2: added __always_inline (Paolo)
> ---
> arch/x86/kvm/cpuid.h | 9 +++++++++
> arch/x86/kvm/svm.c | 5 +----
> arch/x86/kvm/vmx.c | 6 ++----
> arch/x86/kvm/x86.c | 14 ++------------
> 4 files changed, 14 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> index 4e9ac93b4f3a..ac15193e5e52 100644
> --- a/arch/x86/kvm/cpuid.h
> +++ b/arch/x86/kvm/cpuid.h
> @@ -104,6 +104,15 @@ static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_
> return *reg & bit(x86_feature);
> }
>
> +static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
> +{
> + int *reg;
you could instantiate it directly.
> +
> + reg = guest_cpuid_get_register(vcpu, x86_feature);
> + if (reg)
> + *reg &= ~bit(x86_feature);
> +}
> +
> static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
> {
> struct kvm_cpuid_entry2 *best;
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index b8196aecbdcc..2432bb952a30 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -5075,7 +5075,6 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
> static void svm_cpuid_update(struct kvm_vcpu *vcpu)
> {
> struct vcpu_svm *svm = to_svm(vcpu);
> - struct kvm_cpuid_entry2 *entry;
>
> /* Update nrips enabled cache */
> svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
> @@ -5083,9 +5082,7 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
> if (!kvm_vcpu_apicv_active(vcpu))
> return;
>
> - entry = kvm_find_cpuid_entry(vcpu, 1, 0);
> - if (entry)
> - entry->ecx &= ~bit(X86_FEATURE_X2APIC);
> + guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
> }
>
> static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 85b73c1f963a..1731c7aca464 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -9472,15 +9472,13 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
>
> if (vmx_invpcid_supported()) {
> /* Exposing INVPCID only when PCID is exposed */
> - struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
> bool invpcid_enabled =
> - best && best->ebx & bit(X86_FEATURE_INVPCID) &&
> + guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) &&
> guest_cpuid_has(vcpu, X86_FEATURE_PCID);
>
> if (!invpcid_enabled) {
> secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
> - if (best)
> - best->ebx &= ~bit(X86_FEATURE_INVPCID);
> + guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
just how I wanted it :)
> }
>
> if (nested) {
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index ee4e251c82fc..33fd6b6419ef 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1022,21 +1022,11 @@ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
> if (efer & efer_reserved_bits)
> return false;
>
> - if (efer & EFER_FFXSR) {
> - struct kvm_cpuid_entry2 *feat;
> -
> - feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
> - if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
> + if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
> return false;
> - }
>
> - if (efer & EFER_SVME) {
> - struct kvm_cpuid_entry2 *feat;
> -
> - feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
> - if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
> + if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
> return false;
> - }
>
> return true;
> }
>
Very nice cleanup.
Reviewed-by: David Hildenbrand <david@...hat.com>
--
Thanks,
David
Powered by blists - more mailing lists