lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Sun, 23 Feb 2020 10:09:58 +0100
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     Sean Christopherson <sean.j.christopherson@...el.com>
Cc:     Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jim Mattson <jmattson@...gle.com>,
        Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/3] KVM: x86: Consolidate VM allocation and free for VMX
 and SVM

On 27/01/20 01:41, Sean Christopherson wrote:
> Move the VM allocation and free code to common x86 as the logic is
> more or less identical across SVM and VMX.
> 
> Note, although hyperv.hv_pa_pg is part of the common kvm->arch, it's
> (currently) only allocated by VMX VMs.  But, since kfree() plays nice
> when passed a NULL pointer, the superfluous call for SVM is harmless
> and avoids future churn if SVM gains support for HyperV's direct TLB
> flush.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>

Queued, thanks.  Might as well make vm_size a field instead of a pointer 
to function, sacrificing the BUILD_BUG_ONs:

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 24b87e2691c5..f8b45cc0bf49 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1059,7 +1059,7 @@ struct kvm_x86_ops {
 	bool (*has_emulated_msr)(int index);
 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
-	unsigned int (*vm_size)(void);
+	unsigned int vm_size;
 	int (*vm_init)(struct kvm *kvm);
 	void (*vm_destroy)(struct kvm *kvm);
 
@@ -1276,7 +1276,7 @@ struct kvm_arch_async_pf {
 #define __KVM_HAVE_ARCH_VM_ALLOC
 static inline struct kvm *kvm_arch_alloc_vm(void)
 {
-	return __vmalloc(kvm_x86_ops->vm_size(),
+	return __vmalloc(kvm_x86_ops->vm_size,
 			 GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
 }
 void kvm_arch_free_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a5a136e986e9..660387d6caf0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1955,13 +1955,6 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
 	kfree(region);
 }
 
-static unsigned int svm_vm_size(void)
-{
-	BUILD_BUG_ON(offsetof(struct kvm_svm, kvm) != 0);
-
-	return sizeof(struct kvm_svm);
-}
-
 static void sev_vm_destroy(struct kvm *kvm)
 {
 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
@@ -7399,7 +7392,7 @@ static void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate)
 	.vcpu_free = svm_free_vcpu,
 	.vcpu_reset = svm_vcpu_reset,
 
-	.vm_size = svm_vm_size,
+	.vm_size = sizeof(struct kvm_svm),
 	.vm_init = svm_vm_init,
 	.vm_destroy = svm_vm_destroy,
 
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 39a4fea03df5..57ac585394b9 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6645,13 +6645,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	vmx_complete_interrupts(vmx);
 }
 
-static unsigned int vmx_vm_size(void)
-{
-	BUILD_BUG_ON(offsetof(struct kvm_vmx, kvm) != 0);
-
-	return sizeof(struct kvm_vmx);
-}
-
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7749,7 +7742,7 @@ static bool vmx_check_apicv_inhibit_reasons(ulong bit)
 	.cpu_has_accelerated_tpr = report_flexpriority,
 	.has_emulated_msr = vmx_has_emulated_msr,
 
-	.vm_size = vmx_vm_size,
+	.vm_size = sizeof(struct kvm_vmx),
 	.vm_init = vmx_vm_init,
 
 	.vcpu_create = vmx_create_vcpu,

Powered by blists - more mailing lists