[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221125040604.5051-5-weijiang.yang@intel.com>
Date: Thu, 24 Nov 2022 23:05:53 -0500
From: Yang Weijiang <weijiang.yang@...el.com>
To: seanjc@...gle.com, pbonzini@...hat.com, jmattson@...gle.com,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: like.xu.linux@...il.com, kan.liang@...ux.intel.com,
wei.w.wang@...el.com, weijiang.yang@...el.com
Subject: [PATCH v2 04/15] KVM: PMU: disable LBR handling if architectural LBR is available
From: Paolo Bonzini <pbonzini@...hat.com>
Traditional LBR is absent on CPU models that have architectural LBR, so
disable all processing of traditional LBR MSRs if they are not there.
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
Signed-off-by: Yang Weijiang <weijiang.yang@...el.com>
---
arch/x86/kvm/vmx/pmu_intel.c | 32 ++++++++++++++++++++++----------
1 file changed, 22 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index e5cec07ca8d9..905673228932 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -170,19 +170,23 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
{
struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
- bool ret = false;
if (!intel_pmu_lbr_is_enabled(vcpu))
- return ret;
+ return false;
- ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
- (index >= records->from && index < records->from + records->nr) ||
- (index >= records->to && index < records->to + records->nr);
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+ (index == MSR_LBR_SELECT || index == MSR_LBR_TOS))
+ return true;
- if (!ret && records->info)
- ret = (index >= records->info && index < records->info + records->nr);
+ if ((index >= records->from && index < records->from + records->nr) ||
+ (index >= records->to && index < records->to + records->nr))
+ return true;
- return ret;
+ if (records->info && index >= records->info &&
+ index < records->info + records->nr)
+ return true;
+
+ return false;
}
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
@@ -702,6 +706,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
}
+ if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
+ return;
+
vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
}
@@ -742,10 +749,12 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+ (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
if (!lbr_desc->event) {
vmx_disable_lbr_msrs_passthrough(vcpu);
- if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
+ if (lbr_enable)
goto warn;
if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
goto warn;
@@ -768,7 +777,10 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
{
- if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
+ bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+ (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
+
+ if (!lbr_enable)
intel_pmu_release_guest_lbr_event(vcpu);
}
--
2.27.0
Powered by blists - more mailing lists