[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240126085444.324918-11-xiong.y.zhang@linux.intel.com>
Date: Fri, 26 Jan 2024 16:54:13 +0800
From: Xiong Zhang <xiong.y.zhang@...ux.intel.com>
To: seanjc@...gle.com,
pbonzini@...hat.com,
peterz@...radead.org,
mizhang@...gle.com,
kan.liang@...el.com,
zhenyuw@...ux.intel.com,
dapeng1.mi@...ux.intel.com,
jmattson@...gle.com
Cc: kvm@...r.kernel.org,
linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org,
zhiyuan.lv@...el.com,
eranian@...gle.com,
irogers@...gle.com,
samantha.alt@...el.com,
like.xu.linux@...il.com,
chao.gao@...el.com,
xiong.y.zhang@...ux.intel.com
Subject: [RFC PATCH 10/41] perf: core/x86: Plumb passthrough PMU capability from x86_pmu to x86_pmu_cap
From: Mingwei Zhang <mizhang@...gle.com>
Plumb passthrough PMU capability to x86_pmu_cap in order to let any kernel
entity such as KVM know that host PMU support passthrough PMU mode and has
the implementation.
Signed-off-by: Mingwei Zhang <mizhang@...gle.com>
---
arch/x86/events/core.c | 1 +
arch/x86/events/intel/core.c | 4 +++-
arch/x86/events/perf_event.h | 1 +
arch/x86/include/asm/perf_event.h | 1 +
4 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 20a5ccc641b9..d2b7aa5b7876 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -3026,6 +3026,7 @@ void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
cap->events_mask = (unsigned int)x86_pmu.events_maskl;
cap->events_mask_len = x86_pmu.events_mask_len;
cap->pebs_ept = x86_pmu.pebs_ept;
+ cap->passthrough = !!(x86_pmu.flags & PMU_FL_PASSTHROUGH);
}
EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cf790c37757a..727ee64bb566 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -6140,8 +6140,10 @@ __init int intel_pmu_init(void)
pr_cont(" AnyThread deprecated, ");
}
- if (version >= 4)
+ if (version >= 4) {
+ x86_pmu.flags |= PMU_FL_PASSTHROUGH;
x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_VPMU_PASSTHROUGH;
+ }
/*
* Install the hw-cache-events table:
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 53dd5d495ba6..39c58a3f5a6b 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1012,6 +1012,7 @@ do { \
#define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */
#define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */
#define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */
+#define PMU_FL_PASSTHROUGH 0x400 /* Support passthrough mode */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 180d63ba2f46..400727b27634 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -254,6 +254,7 @@ struct x86_pmu_capability {
unsigned int events_mask;
int events_mask_len;
unsigned int pebs_ept :1;
+ unsigned int passthrough :1;
};
/*
--
2.34.1
Powered by blists - more mailing lists