[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210512084446.342526-5-like.xu@linux.intel.com>
Date: Wed, 12 May 2021 16:44:45 +0800
From: Like Xu <like.xu@...ux.intel.com>
To: Paolo Bonzini <pbonzini@...hat.com>, peterz@...radead.org
Cc: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, weijiang.yang@...el.com,
eranian@...gle.com, wei.w.wang@...el.com, kvm@...r.kernel.org,
x86@...nel.org, linux-kernel@...r.kernel.org,
Like Xu <like.xu@...ux.intel.com>
Subject: [PATCH v3 4/5] KVM: x86/pmu: Add counter reload registers to the MSR-load list
The guest counter reload registers need to be loaded to real HW
before VM-entry. Taking into account the existing guest PT
implementation, we add those counter reload registers to MSR-load list
when the corresponding PEBS counters are enabled and the optimization
from clear_atomic_switch_msr() can be reused.
To support that, it needs to expand the value of NR_LOADSTORE_MSRS
from 8 to 16 because when all counters are enabled, up to 7 or 8
counter reload registers need to be added into the MSR-load list.
Cc: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Like Xu <like.xu@...ux.intel.com>
---
arch/x86/events/intel/core.c | 27 +++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.h | 2 +-
2 files changed, 28 insertions(+), 1 deletion(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 4404987bbc57..bd6d9e2a64d9 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3903,6 +3903,8 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
u64 pebs_mask = (x86_pmu.flags & PMU_FL_PEBS_ALL) ?
cpuc->pebs_enabled : (cpuc->pebs_enabled & PEBS_COUNTER_MASK);
+ u64 guest_pebs_enable, base, idx, host_reload_ctr;
+ unsigned long bit;
*nr = 0;
arr[(*nr)++] = (struct perf_guest_switch_msr){
@@ -3964,7 +3966,32 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[0].guest |= arr[*nr].guest;
}
+ guest_pebs_enable = arr[*nr].guest;
++(*nr);
+
+ if (!x86_pmu.intel_cap.pebs_output_pt_available ||
+ !(guest_pebs_enable & PEBS_OUTPUT_PT))
+ return arr;
+
+ for_each_set_bit(bit, (unsigned long *)&guest_pebs_enable,
+ X86_PMC_IDX_MAX) {
+ base = (bit < INTEL_PMC_IDX_FIXED) ?
+ MSR_RELOAD_PMC0 : MSR_RELOAD_FIXED_CTR0;
+ idx = (bit < INTEL_PMC_IDX_FIXED) ?
+ bit : (bit - INTEL_PMC_IDX_FIXED);
+
+ /* It's good when the pebs counters are not cross-mapped. */
+ rdmsrl(base, host_reload_ctr);
+
+ arr[(*nr)++] = (struct perf_guest_switch_msr){
+ .msr = base,
+ .host = host_reload_ctr,
+ .guest = (bit < INTEL_PMC_IDX_FIXED) ?
+ pmu->gp_counters[bit].reload_counter :
+ pmu->fixed_counters[bit].reload_counter,
+ };
+ }
+
return arr;
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 3afdcebb0a11..25aa1cc3cc6a 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -28,7 +28,7 @@ extern const u32 vmx_msr_index[];
#define MAX_NR_USER_RETURN_MSRS 4
#endif
-#define MAX_NR_LOADSTORE_MSRS 8
+#define MAX_NR_LOADSTORE_MSRS 16
struct vmx_msrs {
unsigned int nr;
--
2.31.1
Powered by blists - more mailing lists