lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3924d878-c8f1-06f7-1051-19864708211e@redhat.com>
Date:   Fri, 4 Aug 2017 17:40:48 +0200
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     Radim Krčmář <rkrcmar@...hat.com>,
        linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:     David Hildenbrand <david@...hat.com>
Subject: Re: [PATCH RFC 1/2] KVM: x86: generalize guest_cpuid_has_ helpers

On 02/08/2017 22:41, Radim Krčmář wrote:
> This patch turns guest_cpuid_has_XYZ(cpuid) into guest_cpuid_has(cpuid,
> X86_FEATURE_XYZ), which gets rid of many very similar helpers.
> 
> When seeing a X86_FEATURE_*, we can know which cpuid it belongs to, but
> this information isn't in common code, so we recreate it for KVM.
> 
> Add some BUILD_BUG_ONs to make sure that it runs nicely.
> 
> Signed-off-by: Radim Krčmář <rkrcmar@...hat.com>
> ---
>  arch/x86/kvm/cpuid.h | 202 +++++++++++++++++----------------------------------
>  arch/x86/kvm/mmu.c   |   7 +-
>  arch/x86/kvm/mtrr.c  |   2 +-
>  arch/x86/kvm/svm.c   |   2 +-
>  arch/x86/kvm/vmx.c   |  26 +++----
>  arch/x86/kvm/x86.c   |  38 +++++-----
>  6 files changed, 105 insertions(+), 172 deletions(-)
> 
> diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> index da6728383052..3b17d915b608 100644
> --- a/arch/x86/kvm/cpuid.h
> +++ b/arch/x86/kvm/cpuid.h
> @@ -3,6 +3,7 @@
>  
>  #include "x86.h"
>  #include <asm/cpu.h>
> +#include <asm/processor.h>
>  
>  int kvm_update_cpuid(struct kvm_vcpu *vcpu);
>  bool kvm_mpx_supported(void);
> @@ -29,95 +30,78 @@ static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
>  	return vcpu->arch.maxphyaddr;
>  }
>  
> -static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
> +struct cpuid_reg {
> +	u32 function;
> +	u32 index;
> +	int reg;
> +};
> +
> +static const struct cpuid_reg reverse_cpuid[] = {
> +	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
> +	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
> +	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
> +	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
> +	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
> +	[CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
> +	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
> +	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
> +	[CPUID_F_0_EDX]       = {       0xf, 0, CPUID_EDX},
> +	[CPUID_F_1_EDX]       = {       0xf, 1, CPUID_EDX},
> +	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
> +	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
> +	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
> +	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
> +	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
> +};
> +
> +static inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
> +{
> +	unsigned x86_leaf = x86_feature / 32;
> +
> +	BUILD_BUG_ON(!__builtin_constant_p(x86_leaf));
> +	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
> +	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
> +
> +	return reverse_cpuid[x86_leaf];
> +}
> +
> +static inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
>  {
>  	struct kvm_cpuid_entry2 *best;
> +	struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
>  
> -	if (!static_cpu_has(X86_FEATURE_XSAVE))
> +	best = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
> +	if (!best)
> +		return NULL;
> +
> +	switch (cpuid.reg) {
> +	case CPUID_EAX:
> +		return &best->eax;
> +	case CPUID_EBX:
> +		return &best->ebx;
> +	case CPUID_ECX:
> +		return &best->ecx;
> +	case CPUID_EDX:
> +		return &best->edx;
> +	default:
> +		BUILD_BUG();
> +		return NULL;
> +	}
> +}

Wow, I didn't expect the compiler to be able to inline all of this and
even do BUILD_BUG_ON()s on array lookups.  Maybe change inline to
__always_inline just to be safe?

If anybody complains we can make it just a BUG.

Paolo

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ