[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220831223438.413090-14-weijiang.yang@intel.com>
Date: Wed, 31 Aug 2022 18:34:36 -0400
From: Yang Weijiang <weijiang.yang@...el.com>
To: pbonzini@...hat.com, seanjc@...gle.com, kvm@...r.kernel.org
Cc: like.xu.linux@...il.com, kan.liang@...ux.intel.com,
wei.w.wang@...el.com, linux-kernel@...r.kernel.org
Subject: [PATCH 13/15] KVM: x86/vmx: Flip Arch LBREn bit on guest state change
Per spec:"IA32_LBR_CTL.LBREn is saved and cleared on #SMI, and restored
on RSM. On a warm reset, all LBR MSRs, including IA32_LBR_DEPTH, have their
values preserved. However, IA32_LBR_CTL.LBREn is cleared to 0, disabling
LBRs." At guest SMM entry, store guest IA32_LBR_CTL in SMRAM and clear LBREn
in VMCS, do reverse things at SMM exit. Also clear LBREn at warm reset.
Suggested-by: Paolo Bonzini <pbonzini@...hat.com>
Signed-off-by: Yang Weijiang <weijiang.yang@...el.com>
Message-Id: <20220517154100.29983-15-weijiang.yang@...el.com>
Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
arch/x86/kvm/vmx/vmx.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index dddba2a48542..82b1bde382bb 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4827,6 +4827,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!init_event) {
if (cpu_has_vmx_arch_lbr())
vmcs_write64(GUEST_IA32_LBR_CTL, 0);
+ } else {
+ disable_arch_lbr_ctl(vcpu);
}
}
@@ -7967,6 +7969,8 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
@@ -7983,11 +7987,22 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
vmx->nested.smm.vmxon = vmx->nested.vmxon;
vmx->nested.vmxon = false;
vmx_clear_hlt(vcpu);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use) &&
+ lbr_desc->event && guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
+
+ put_smstate(u64, smstate, 0x7f10, ctl);
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
+ }
+
return 0;
}
static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -8004,6 +8019,17 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = GET_SMSTATE(u64, smstate, 0x7f10);
+
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl | ARCH_LBR_CTL_LBREN);
+
+ if (intel_pmu_lbr_is_enabled(vcpu) && !lbr_desc->event)
+ intel_pmu_create_guest_lbr_event(vcpu);
+ }
+
return 0;
}
--
2.27.0
Powered by blists - more mailing lists