lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87h65x8krc.fsf@redhat.com>
Date: Fri, 17 Jan 2025 17:31:35 +0100
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini
 <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org, Dongjie Zou
 <zoudongjie@...wei.com>, stable@...r.kernel
Subject: Re: [PATCH 4/5] KVM: selftests: Manage CPUID array in Hyper-V CPUID
 test's core helper

Sean Christopherson <seanjc@...gle.com> writes:

> Allocate, get, and free the CPUID array in the Hyper-V CPUID test in the
> test's core helper, instead of copy+pasting code at each call site.  In
> addition to deduplicating a small amount of code, restricting visibility
> of the array to a single invocation of the core test prevents "leaking" an
> array across test cases.  Passing in @vcpu to the helper will also allow
> pivoting on VM-scoped information without needing to pass more booleans,
> e.g. to conditionally assert on features that require an in-kernel APIC.
>
> Cc: Vitaly Kuznetsov <vkuznets@...hat.com>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
>  .../selftests/kvm/x86_64/hyperv_cpuid.c       | 25 ++++++++-----------
>  1 file changed, 11 insertions(+), 14 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
> index 09f9874d7705..90c44765d584 100644
> --- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
> +++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
> @@ -41,13 +41,18 @@ static bool smt_possible(void)
>  	return res;
>  }
>  
> -static void test_hv_cpuid(const struct kvm_cpuid2 *hv_cpuid_entries,
> -			  bool evmcs_expected)
> +static void test_hv_cpuid(struct kvm_vcpu *vcpu, bool evmcs_expected)
>  {
> +	const struct kvm_cpuid2 *hv_cpuid_entries;
>  	int i;
>  	int nent_expected = 10;
>  	u32 test_val;
>  
> +	if (vcpu)
> +		hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
> +	else
> +		hv_cpuid_entries = kvm_get_supported_hv_cpuid();
> +
>  	TEST_ASSERT(hv_cpuid_entries->nent == nent_expected,
>  		    "KVM_GET_SUPPORTED_HV_CPUID should return %d entries"
>  		    " (returned %d)",
> @@ -109,6 +114,7 @@ static void test_hv_cpuid(const struct kvm_cpuid2 *hv_cpuid_entries,
>  		 *	entry->edx);
>  		 */
>  	}
> +	free((void *)hv_cpuid_entries);

(see my comment on "[PATCH 3/5] KVM: selftests: Explicitly free CPUID
array at end of Hyper-V CPUID test"): 

vcpu_get_supported_hv_cpuid() allocates memory for the resulting array
each time, however, kvm_get_supported_hv_cpuid() doesn't so freeing
hv_cpuid_entries here will result in returning already freed memory next
time kvm_get_supported_hv_cpuid() path is taken.

>  }
>  
>  static void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
> @@ -129,7 +135,6 @@ static void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
>  int main(int argc, char *argv[])
>  {
>  	struct kvm_vm *vm;
> -	const struct kvm_cpuid2 *hv_cpuid_entries;
>  	struct kvm_vcpu *vcpu;
>  
>  	TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
> @@ -138,10 +143,7 @@ int main(int argc, char *argv[])
>  
>  	/* Test vCPU ioctl version */
>  	test_hv_cpuid_e2big(vm, vcpu);
> -
> -	hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
> -	test_hv_cpuid(hv_cpuid_entries, false);
> -	free((void *)hv_cpuid_entries);
> +	test_hv_cpuid(vcpu, false);
>  
>  	if (!kvm_cpu_has(X86_FEATURE_VMX) ||
>  	    !kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
> @@ -149,9 +151,7 @@ int main(int argc, char *argv[])
>  		goto do_sys;
>  	}
>  	vcpu_enable_evmcs(vcpu);
> -	hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
> -	test_hv_cpuid(hv_cpuid_entries, true);
> -	free((void *)hv_cpuid_entries);
> +	test_hv_cpuid(vcpu, true);
>  
>  do_sys:
>  	/* Test system ioctl version */
> @@ -161,10 +161,7 @@ int main(int argc, char *argv[])
>  	}
>  
>  	test_hv_cpuid_e2big(vm, NULL);
> -
> -	hv_cpuid_entries = kvm_get_supported_hv_cpuid();
> -	test_hv_cpuid(hv_cpuid_entries, kvm_cpu_has(X86_FEATURE_VMX));
> -	free((void *)hv_cpuid_entries);
> +	test_hv_cpuid(NULL, kvm_cpu_has(X86_FEATURE_VMX));
>  
>  out:
>  	kvm_vm_free(vm);

-- 
Vitaly


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ