[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190702065955.165738-7-irogers@google.com>
Date: Mon, 1 Jul 2019 23:59:54 -0700
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
linux-kernel@...r.kernel.org
Cc: Kan Liang <kan.liang@...ux.intel.com>,
Stephane Eranian <eranian@...gle.com>,
Ian Rogers <irogers@...gle.com>
Subject: [PATCH 6/7] perf: avoid double checking CPU and cgroup
When ctx_groups_sched_in iterates the CPU and cgroup of events is known
to match the current task. Avoid double checking this with
event_filter_match by passing in an additional argument.
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
kernel/events/core.c | 27 ++++++++++++++++++---------
1 file changed, 18 insertions(+), 9 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7608bd562dac..a66477ee196a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2079,10 +2079,12 @@ static inline int pmu_filter_match(struct perf_event *event)
}
static inline int
-event_filter_match(struct perf_event *event)
+event_filter_match(struct perf_event *event, bool check_cgroup_and_cpu)
{
- return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
- perf_cgroup_match(event) && pmu_filter_match(event);
+ return (!check_cgroup_and_cpu ||
+ ((event->cpu == -1 || event->cpu == smp_processor_id()) &&
+ perf_cgroup_match(event))) &&
+ pmu_filter_match(event);
}
static void
@@ -2797,7 +2799,7 @@ static void __perf_event_enable(struct perf_event *event,
if (!ctx->is_active)
return;
- if (!event_filter_match(event)) {
+ if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true)) {
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
}
@@ -3573,7 +3575,10 @@ static int pinned_sched_in(struct perf_event_context *ctx,
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
- if (!event_filter_match(event))
+ /* The caller already checked the CPU and cgroup before calling
+ * pinned_sched_in.
+ */
+ if (!event_filter_match(event, /*check_cpu_and_cgroup=*/false))
return 0;
if (group_can_go_on(event, cpuctx, 1)) {
@@ -3599,7 +3604,10 @@ static int flexible_sched_in(struct perf_event_context *ctx,
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
- if (!event_filter_match(event))
+ /* The caller already checked the CPU and cgroup before calling
+ * felxible_sched_in.
+ */
+ if (!event_filter_match(event, /*check_cpu_and_cgroup=*/false))
return 0;
if (group_can_go_on(event, cpuctx, *can_add_hw)) {
@@ -3899,7 +3907,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
- if (!event_filter_match(event))
+ if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true))
continue;
perf_pmu_disable(event->pmu);
@@ -6929,7 +6937,8 @@ perf_iterate_ctx(struct perf_event_context *ctx,
if (!all) {
if (event->state < PERF_EVENT_STATE_INACTIVE)
continue;
- if (!event_filter_match(event))
+ if (!event_filter_match(event,
+ /*check_cpu_and_cgroup=*/true))
continue;
}
@@ -6953,7 +6962,7 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
if (event->state < PERF_EVENT_STATE_INACTIVE)
continue;
- if (!event_filter_match(event))
+ if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true))
continue;
output(event, data);
}
--
2.22.0.410.gd8fdbe21b5-goog
Powered by blists - more mailing lists