lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20191105140955.22504-1-like.xu@linux.intel.com>
Date:   Tue,  5 Nov 2019 22:09:55 +0800
From:   Like Xu <like.xu@...ux.intel.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>
Cc:     Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...hat.com>,
        Namhyung Kim <namhyung@...nel.org>,
        linux-kernel@...r.kernel.org, Paolo Bonzini <pbonzini@...hat.com>
Subject: [PATCH] perf/x86: Expose more Intel perf_capabilities to other modules

The vPMU feature on KVM is dependent on the native perf implementation.
Now KVM needs to know more about PMU capabilities such as lbr, pebs and
full_width_write to determine what features it can provide for guests.

Signed-off-by: Like Xu <like.xu@...ux.intel.com>
---
 arch/x86/events/core.c            |  1 +
 arch/x86/events/perf_event.h      | 19 -------------------
 arch/x86/include/asm/perf_event.h | 20 ++++++++++++++++++++
 3 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 7b21455d7504..fe8bb51cb277 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2587,5 +2587,6 @@ void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
 	cap->bit_width_fixed	= x86_pmu.cntval_bits;
 	cap->events_mask	= (unsigned int)x86_pmu.events_maskl;
 	cap->events_mask_len	= x86_pmu.events_mask_len;
+	cap->pmu_cap		= x86_pmu.intel_cap;
 }
 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ecacfbf4ebc1..ca1c94dd111f 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -505,25 +505,6 @@ struct extra_reg {
 
 #define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
 
-union perf_capabilities {
-	struct {
-		u64	lbr_format:6;
-		u64	pebs_trap:1;
-		u64	pebs_arch_reg:1;
-		u64	pebs_format:4;
-		u64	smm_freeze:1;
-		/*
-		 * PMU supports separate counter range for writing
-		 * values > 32bit.
-		 */
-		u64	full_width_write:1;
-		u64     pebs_baseline:1;
-		u64	pebs_metrics_available:1;
-		u64	pebs_output_pt_available:1;
-	};
-	u64	capabilities;
-};
-
 struct x86_pmu_quirk {
 	struct x86_pmu_quirk *next;
 	void (*func)(void);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index ee26e9215f18..7983cc8e5a15 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -131,6 +131,25 @@ union cpuid10_edx {
 	unsigned int full;
 };
 
+union perf_capabilities {
+	struct {
+		u64	lbr_format:6;
+		u64	pebs_trap:1;
+		u64	pebs_arch_reg:1;
+		u64	pebs_format:4;
+		u64	smm_freeze:1;
+		/*
+		 * PMU supports separate counter range for writing
+		 * values > 32bit.
+		 */
+		u64	full_width_write:1;
+		u64     pebs_baseline:1;
+		u64	pebs_metrics_available:1;
+		u64	pebs_output_pt_available:1;
+	};
+	u64	capabilities;
+};
+
 struct x86_pmu_capability {
 	int		version;
 	int		num_counters_gp;
@@ -139,6 +158,7 @@ struct x86_pmu_capability {
 	int		bit_width_fixed;
 	unsigned int	events_mask;
 	int		events_mask_len;
+	union perf_capabilities pmu_cap;
 };
 
 /*
-- 
2.21.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ