[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220831223438.413090-5-weijiang.yang@intel.com>
Date: Wed, 31 Aug 2022 18:34:27 -0400
From: Yang Weijiang <weijiang.yang@...el.com>
To: pbonzini@...hat.com, seanjc@...gle.com, kvm@...r.kernel.org
Cc: like.xu.linux@...il.com, kan.liang@...ux.intel.com,
wei.w.wang@...el.com, linux-kernel@...r.kernel.org
Subject: [PATCH 04/15] KVM: PMU: disable LBR handling if architectural LBR is available
From: Paolo Bonzini <pbonzini@...hat.com>
Traditional LBR is absent on CPU models that have architectural LBR, so
disable all processing of traditional LBR MSRs if they are not there.
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/kvm/vmx/pmu_intel.c | 32 ++++++++++++++++++++++----------
1 file changed, 22 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index c399637a3a79..89cb75bb0280 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -174,19 +174,23 @@ static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
{
struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
- bool ret = false;
if (!intel_pmu_lbr_is_enabled(vcpu))
- return ret;
+ return false;
- ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
- (index >= records->from && index < records->from + records->nr) ||
- (index >= records->to && index < records->to + records->nr);
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+ (index == MSR_LBR_SELECT || index == MSR_LBR_TOS))
+ return true;
- if (!ret && records->info)
- ret = (index >= records->info && index < records->info + records->nr);
+ if ((index >= records->from && index < records->from + records->nr) ||
+ (index >= records->to && index < records->to + records->nr))
+ return true;
- return ret;
+ if (records->info && index >= records->info &&
+ index < records->info + records->nr)
+ return true;
+
+ return false;
}
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
@@ -703,6 +707,9 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
}
+ if (guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR))
+ return;
+
vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
}
@@ -743,10 +750,12 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+ (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
if (!lbr_desc->event) {
vmx_disable_lbr_msrs_passthrough(vcpu);
- if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
+ if (lbr_enable)
goto warn;
if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
goto warn;
@@ -769,7 +778,10 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
{
- if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
+ bool lbr_enable = !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR) &&
+ (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR);
+
+ if (!lbr_enable)
intel_pmu_release_guest_lbr_event(vcpu);
}
--
2.27.0
Powered by blists - more mailing lists