[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240126085444.324918-13-xiong.y.zhang@linux.intel.com>
Date: Fri, 26 Jan 2024 16:54:15 +0800
From: Xiong Zhang <xiong.y.zhang@...ux.intel.com>
To: seanjc@...gle.com,
pbonzini@...hat.com,
peterz@...radead.org,
mizhang@...gle.com,
kan.liang@...el.com,
zhenyuw@...ux.intel.com,
dapeng1.mi@...ux.intel.com,
jmattson@...gle.com
Cc: kvm@...r.kernel.org,
linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org,
zhiyuan.lv@...el.com,
eranian@...gle.com,
irogers@...gle.com,
samantha.alt@...el.com,
like.xu.linux@...il.com,
chao.gao@...el.com,
xiong.y.zhang@...ux.intel.com,
Xiong Zhang <xiong.y.zhang@...el.com>
Subject: [RFC PATCH 12/41] KVM: x86/pmu: Plumb through passthrough PMU to vcpu for Intel CPUs
From: Mingwei Zhang <mizhang@...gle.com>
Plumb through passthrough PMU setting from kvm->arch into kvm_pmu on each
vcpu created. Note that enabling PMU is decided by VMM when it sets the
CPUID bits exposed to guest VM. So plumb through the enabling for each pmu
in intel_pmu_refresh().
Co-developed-by: Xiong Zhang <xiong.y.zhang@...el.com>
Signed-off-by: Xiong Zhang <xiong.y.zhang@...el.com>
Signed-off-by: Mingwei Zhang <mizhang@...gle.com>
---
arch/x86/include/asm/kvm_host.h | 2 ++
arch/x86/kvm/pmu.c | 1 +
arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++--
3 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f2e73e6830a3..ede45c923089 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -575,6 +575,8 @@ struct kvm_pmu {
* redundant check before cleanup if guest don't use vPMU at all.
*/
u8 event_count;
+
+ bool passthrough;
};
struct kvm_pmu_ops;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 9ae07db6f0f6..1853739a59bf 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -665,6 +665,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
static_call(kvm_x86_pmu_init)(vcpu);
pmu->event_count = 0;
pmu->need_cleanup = false;
+ pmu->passthrough = false;
kvm_pmu_refresh(vcpu);
}
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 820d3e1f6b4f..15cc107ed573 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -517,14 +517,20 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
return;
entry = kvm_find_cpuid_entry(vcpu, 0xa);
- if (!entry || !vcpu->kvm->arch.enable_pmu)
+ if (!entry || !vcpu->kvm->arch.enable_pmu) {
+ pmu->passthrough = false;
return;
+ }
eax.full = entry->eax;
edx.full = entry->edx;
pmu->version = eax.split.version_id;
- if (!pmu->version)
+ if (!pmu->version) {
+ pmu->passthrough = false;
return;
+ }
+
+ pmu->passthrough = vcpu->kvm->arch.enable_passthrough_pmu;
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
kvm_pmu_cap.num_counters_gp);
--
2.34.1
Powered by blists - more mailing lists