[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221125040604.5051-14-weijiang.yang@intel.com>
Date: Thu, 24 Nov 2022 23:06:02 -0500
From: Yang Weijiang <weijiang.yang@...el.com>
To: seanjc@...gle.com, pbonzini@...hat.com, jmattson@...gle.com,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: like.xu.linux@...il.com, kan.liang@...ux.intel.com,
wei.w.wang@...el.com, weijiang.yang@...el.com
Subject: [PATCH v2 13/15] KVM: x86/vmx: Save/Restore guest Arch LBR Ctrl msr at SMM entry/exit
Per SDM 3B Chapter 18: "IA32_LBR_CTL.LBREn is saved and cleared on #SMI,
and restored on RSM", store guest IA32_LBR_CTL in SMRAM and clear LBREn
in VMCS at SMM entry, and do reverse things at SMM exit.
Suggested-by: Paolo Bonzini <pbonzini@...hat.com>
Signed-off-by: Yang Weijiang <weijiang.yang@...el.com>
---
arch/x86/kvm/smm.c | 1 +
arch/x86/kvm/smm.h | 3 ++-
arch/x86/kvm/vmx/vmx.c | 22 ++++++++++++++++++++++
3 files changed, 25 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index a9c1c2af8d94..5987090b440f 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -86,6 +86,7 @@ static void check_smram_offsets(void)
CHECK_SMRAM64_OFFSET(smm_revison, 0xFEFC);
CHECK_SMRAM64_OFFSET(smbase, 0xFF00);
CHECK_SMRAM64_OFFSET(reserved4, 0xFF04);
+ CHECK_SMRAM64_OFFSET(arch_lbr_ctl, 0xFF10);
CHECK_SMRAM64_OFFSET(ssp, 0xFF18);
CHECK_SMRAM64_OFFSET(svm_guest_pat, 0xFF20);
CHECK_SMRAM64_OFFSET(svm_host_efer, 0xFF28);
diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
index a1cf2ac5bd78..5a6479205d91 100644
--- a/arch/x86/kvm/smm.h
+++ b/arch/x86/kvm/smm.h
@@ -114,7 +114,8 @@ struct kvm_smram_state_64 {
u32 reserved3[3];
u32 smm_revison;
u32 smbase;
- u32 reserved4[5];
+ u32 reserved4[3];
+ u64 arch_lbr_ctl;
/* ssp and svm_* fields below are not implemented by KVM */
u64 ssp;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6ad765ea4059..cc782233c075 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8006,11 +8006,21 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
vmx->nested.smm.vmxon = vmx->nested.vmxon;
vmx->nested.vmxon = false;
vmx_clear_hlt(vcpu);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
+
+ smram->smram64.arch_lbr_ctl = ctl;
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ~ARCH_LBR_CTL_LBREN);
+ }
+
return 0;
}
static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -8027,6 +8037,18 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 ctl = smram->smram64.arch_lbr_ctl;
+
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl & ARCH_LBR_CTL_LBREN);
+
+ if (intel_pmu_lbr_is_enabled(vcpu) &&
+ (ctl & ARCH_LBR_CTL_LBREN) && !lbr_desc->event)
+ intel_pmu_create_guest_lbr_event(vcpu);
+ }
+
return 0;
}
--
2.27.0
Powered by blists - more mailing lists