[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190318214144.4639-6-kan.liang@linux.intel.com>
Date: Mon, 18 Mar 2019 14:41:27 -0700
From: kan.liang@...ux.intel.com
To: peterz@...radead.org, acme@...nel.org, mingo@...hat.com,
linux-kernel@...r.kernel.org
Cc: tglx@...utronix.de, jolsa@...nel.org, eranian@...gle.com,
alexander.shishkin@...ux.intel.com, ak@...ux.intel.com,
Kan Liang <kan.liang@...ux.intel.com>
Subject: [PATCH 05/22] perf/x86: Support constraint ranges
From: Andi Kleen <ak@...ux.intel.com>
Icelake extended the general counters to 8, even when SMT is enabled.
However only a (large) subset of the events can be used on all 8
counters.
The events that can or cannot be used on all counters are organized
in ranges.
We need a lot of scheduler constraints to handle all this.
To avoid blowing up the tables add event code ranges to the constraint
tables, and a new inline function to match them.
The changes costs ~2k text size according to 0day report.
Signed-off-by: Andi Kleen <ak@...ux.intel.com>
Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
---
arch/x86/events/intel/core.c | 2 +-
arch/x86/events/intel/ds.c | 2 +-
arch/x86/events/perf_event.h | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 36 insertions(+), 2 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a964b9832b0c..8486ab87f8f8 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2655,7 +2655,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
- if ((event->hw.config & c->cmask) == c->code) {
+ if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
}
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 974284c5ed6c..30370fb93e21 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -858,7 +858,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
if (x86_pmu.pebs_constraints) {
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
- if ((event->hw.config & c->cmask) == c->code) {
+ if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 27c7945b5174..863d27f4c352 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -54,6 +54,7 @@ struct event_constraint {
int weight;
int overlap;
int flags;
+ u64 range_end;
};
/*
* struct hw_perf_event.flags flags
@@ -71,6 +72,12 @@ struct event_constraint {
#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
+static inline bool constraint_match(struct event_constraint *c, u64 ecode)
+{
+ ecode &= c->cmask;
+ return ecode == c->code ||
+ (c->range_end && ecode >= c->code && ecode <= c->range_end);
+}
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -267,9 +274,22 @@ struct cpu_hw_events {
.flags = f, \
}
+#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
+ { .idxmsk64 = (n) }, \
+ .code = (c), \
+ .range_end = (e), \
+ .cmask = (m), \
+ .weight = (w), \
+ .overlap = (o), \
+ .flags = f, \
+}
+
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
+#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
+ __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
+
#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
0, PERF_X86_EVENT_EXCL)
@@ -304,6 +324,12 @@ struct cpu_hw_events {
#define INTEL_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
+/*
+ * Constraint on a range of Event codes
+ */
+#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
+ EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
+
/*
* Constraint on the Event code + UMask + fixed-mask
*
@@ -351,6 +377,9 @@ struct cpu_hw_events {
#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
+ EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+
/* Check only flags, but allow all event/umask */
#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
@@ -367,6 +396,11 @@ struct cpu_hw_events {
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
+ __EVENT_CONSTRAINT_RANGE(code, end, n, \
+ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
__EVENT_CONSTRAINT(code, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
--
2.17.1
Powered by blists - more mailing lists