lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 31 May 2022 13:54:49 -0400
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:     likexu@...cent.com
Subject: [PATCH 1/2] KVM: vmx, pmu: accept 0 for absent MSRs when host-initiated

Whenever an MSR is part of KVM_GET_MSR_INDEX_LIST, as is the case for
MSR_IA32_DS_AREA, MSR_ARCH_LBR_DEPTH or MSR_ARCH_LBR_CTL, it has to be
always settable with KVM_SET_MSR.  Accept a zero value for these MSRs
to obey the contract.

Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 15 ++++++++++++---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 3e04d0407605..66496cb41494 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -367,8 +367,9 @@ static bool arch_lbr_depth_is_valid(struct kvm_vcpu *vcpu, u64 depth)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 
-	if (!kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
-		return false;
+	if (!kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) ||
+	    !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
+		return depth == 0;
 
 	return (depth == pmu->kvm_arch_lbr_depth);
 }
@@ -378,7 +379,7 @@ static bool arch_lbr_ctl_is_valid(struct kvm_vcpu *vcpu, u64 ctl)
 	struct kvm_cpuid_entry2 *entry;
 
 	if (!kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
-		return false;
+		return ctl == 0;
 
 	if (ctl & ~KVM_ARCH_LBR_CTL_MASK)
 		goto warn;
@@ -510,6 +511,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		}
 		break;
 	case MSR_IA32_DS_AREA:
+		if (msr_info->host_initiated && data && !guest_cpuid_has(vcpu, X86_FEATURE_DS))
+			return 1;
 		if (is_noncanonical_address(data, vcpu))
 			return 1;
 		pmu->ds_area = data;
@@ -525,7 +528,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_ARCH_LBR_DEPTH:
 		if (!arch_lbr_depth_is_valid(vcpu, data))
 			return 1;
+
 		lbr_desc->records.nr = data;
+		if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
+			return 0;
+
 		/*
 		 * Writing depth MSR from guest could either setting the
 		 * MSR or resetting the LBR records with the side-effect.
@@ -535,6 +542,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_ARCH_LBR_CTL:
 		if (!arch_lbr_ctl_is_valid(vcpu, data))
 			break;
+		if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
+			return 0;
 
 		vmcs_write64(GUEST_IA32_LBR_CTL, data);
 
-- 
2.31.1


Powered by blists - more mailing lists