[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d2e55a12-02d8-8c7c-24de-f049c6e0e445@intel.com>
Date: Thu, 12 May 2022 14:44:10 +0800
From: "Yang, Weijiang" <weijiang.yang@...el.com>
To: Paolo Bonzini <pbonzini@...hat.com>,
"jmattson@...gle.com" <jmattson@...gle.com>,
"seanjc@...gle.com" <seanjc@...gle.com>,
"kan.liang@...ux.intel.com" <kan.liang@...ux.intel.com>,
"like.xu.linux@...il.com" <like.xu.linux@...il.com>,
"vkuznets@...hat.com" <vkuznets@...hat.com>,
"Wang, Wei W" <wei.w.wang@...el.com>,
"kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v11 14/16] KVM: x86/vmx: Flip Arch LBREn bit on guest
state change
On 5/10/2022 11:51 PM, Paolo Bonzini wrote:
> On 5/6/22 05:33, Yang Weijiang wrote:
>> Per spec:"IA32_LBR_CTL.LBREn is saved and cleared on #SMI, and restored
>> on RSM. On a warm reset, all LBR MSRs, including IA32_LBR_DEPTH, have their
>> values preserved. However, IA32_LBR_CTL.LBREn is cleared to 0, disabling
>> LBRs." So clear Arch LBREn bit on #SMI and restore it on RSM manully, also
>> clear the bit when guest does warm reset.
>>
>> Signed-off-by: Yang Weijiang <weijiang.yang@...el.com>
>> ---
>> arch/x86/kvm/vmx/vmx.c | 4 ++++
>> 1 file changed, 4 insertions(+)
>>
>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>> index 6d6ee9cf82f5..b38f58868905 100644
>> --- a/arch/x86/kvm/vmx/vmx.c
>> +++ b/arch/x86/kvm/vmx/vmx.c
>> @@ -4593,6 +4593,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
>> if (!init_event) {
>> if (static_cpu_has(X86_FEATURE_ARCH_LBR))
>> vmcs_write64(GUEST_IA32_LBR_CTL, 0);
>> + } else {
>> + flip_arch_lbr_ctl(vcpu, false);
>> }
>> }
>>
>> @@ -7704,6 +7706,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
>> vmx->nested.smm.vmxon = vmx->nested.vmxon;
>> vmx->nested.vmxon = false;
>> vmx_clear_hlt(vcpu);
>> + flip_arch_lbr_ctl(vcpu, false);
>> return 0;
>> }
>>
>> @@ -7725,6 +7728,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
>> vmx->nested.nested_run_pending = 1;
>> vmx->nested.smm.guest_mode = false;
>> }
>> + flip_arch_lbr_ctl(vcpu, true);
>> return 0;
>> }
>>
> This is incorrect, you hare not saving/restoring the actual value of
> LBREn (which is "lbr_desc->event != NULL"). Therefore, a migration
> while in SMM would lose the value of LBREn = true.
>
> Instead of using flip_arch_lbr_ctl, SMM should save the value of the MSR
> in kvm_x86_ops->enter_smm, and restore it in kvm_x86_ops->leave_smm
> (feel free to do it only if guest_cpuid_has(vcpu, X86_FEATURE_LM), i.e.
> the 32-bit case can be ignored).
Hi, Paolo,
I re-factored this patch as below to enclose your above suggestion,
could you
kindly check? If it's OK then I'll refresh this series with v12, thanks!
======================================================================
From dad3abc7fe96022dd3dcee8f958960bbd4f68b95 Mon Sep 17 00:00:00 2001
From: Yang Weijiang <weijiang.yang@...el.com>
Date: Thu, 5 Aug 2021 20:48:39 +0800
Subject: [PATCH] KVM: x86/vmx: Flip Arch LBREn bit on guest state change
Per spec:"IA32_LBR_CTL.LBREn is saved and cleared on #SMI, and restored
on RSM. On a warm reset, all LBR MSRs, including IA32_LBR_DEPTH, have their
values preserved. However, IA32_LBR_CTL.LBREn is cleared to 0, disabling
LBRs." Use a reserved bit(63) of the MSR to hide LBREn bit on #SMI and
restore it to LBREn on RSM manully, also clear the bit when guest does
warm reset.
Signed-off-by: Yang Weijiang <weijiang.yang@...el.com>
---
arch/x86/kvm/vmx/pmu_intel.c | 16 +++++++++++++---
arch/x86/kvm/vmx/vmx.c | 24 ++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.h | 1 +
3 files changed, 38 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 038fdb788ccd..652601ad99ea 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -373,6 +373,8 @@ static bool arch_lbr_depth_is_valid(struct kvm_vcpu
*vcpu, u64 depth)
return (depth == pmu->kvm_arch_lbr_depth);
}
+#define ARCH_LBR_IN_SMM BIT(63)
+
static bool arch_lbr_ctl_is_valid(struct kvm_vcpu *vcpu, u64 ctl)
{
struct kvm_cpuid_entry2 *entry;
@@ -380,7 +382,7 @@ static bool arch_lbr_ctl_is_valid(struct kvm_vcpu
*vcpu, u64 ctl)
if (!kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
return false;
- if (ctl & ~KVM_ARCH_LBR_CTL_MASK)
+ if (ctl & ~(KVM_ARCH_LBR_CTL_MASK | ARCH_LBR_IN_SMM))
goto warn;
entry = kvm_find_cpuid_entry(vcpu, 0x1c, 0);
@@ -425,6 +427,10 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu,
struct msr_data *msr_info)
return 0;
case MSR_ARCH_LBR_CTL:
msr_info->data = vmcs_read64(GUEST_IA32_LBR_CTL);
+ if (to_vmx(vcpu)->lbr_in_smm) {
+ msr_info->data |= ARCH_LBR_CTL_LBREN;
+ msr_info->data |= ARCH_LBR_IN_SMM;
+ }
return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
@@ -501,11 +507,15 @@ static int intel_pmu_set_msr(struct kvm_vcpu
*vcpu, struct msr_data *msr_info)
if (!arch_lbr_ctl_is_valid(vcpu, data))
break;
- vmcs_write64(GUEST_IA32_LBR_CTL, data);
-
if (intel_pmu_lbr_is_enabled(vcpu) && !lbr_desc->event &&
(data & ARCH_LBR_CTL_LBREN))
intel_pmu_create_guest_lbr_event(vcpu);
+
+ if (data & ARCH_LBR_IN_SMM) {
+ data &= ~ARCH_LBR_CTL_LBREN;
+ data &= ~ARCH_LBR_IN_SMM;
+ }
+ vmcs_write64(GUEST_IA32_LBR_CTL, data);
return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6d6ee9cf82f5..eadad24a68e6 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4543,6 +4543,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu,
bool init_event)
vmx->rmode.vm86_active = 0;
vmx->spec_ctrl = 0;
+ vmx->lbr_in_smm = false;
vmx->msr_ia32_umwait_control = 0;
@@ -4593,6 +4594,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu,
bool init_event)
if (!init_event) {
if (static_cpu_has(X86_FEATURE_ARCH_LBR))
vmcs_write64(GUEST_IA32_LBR_CTL, 0);
+ } else {
+ flip_arch_lbr_ctl(vcpu, false);
}
}
@@ -7695,6 +7698,8 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu,
bool for_injection)
static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
@@ -7704,12 +7709,21 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu,
char *smstate)
vmx->nested.smm.vmxon = vmx->nested.vmxon;
vmx->nested.vmxon = false;
vmx_clear_hlt(vcpu);
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use) &&
+ lbr_desc->event)
+ vmx->lbr_in_smm = true;
+
return 0;
}
static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
+ struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
+
int ret;
if (vmx->nested.smm.vmxon) {
@@ -7725,6 +7739,16 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu,
const char *smstate)
vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
+
+ if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+ test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use) &&
+ lbr_desc->event && vmx->lbr_in_smm) {
+ u64 ctl = vmcs_read64(GUEST_IA32_LBR_CTL);
+
+ vmcs_write64(GUEST_IA32_LBR_CTL, ctl | ARCH_LBR_CTL_LBREN);
+ vmx->lbr_in_smm = false;
+ }
+
return 0;
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index b98c7e96697a..a227fe8bf288 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -351,6 +351,7 @@ struct vcpu_vmx {
struct pt_desc pt_desc;
struct lbr_desc lbr_desc;
+ bool lbr_in_smm;
/* Save desired MSR intercept (read: pass-through) state */
#define MAX_POSSIBLE_PASSTHROUGH_MSRS 15
--
2.27.0
>
> Paolo
Powered by blists - more mailing lists