[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190702065955.165738-8-irogers@google.com>
Date: Mon, 1 Jul 2019 23:59:55 -0700
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
linux-kernel@...r.kernel.org
Cc: Kan Liang <kan.liang@...ux.intel.com>,
Stephane Eranian <eranian@...gle.com>,
Ian Rogers <irogers@...gle.com>
Subject: [PATCH 7/7] perf: rename visit_groups_merge to ctx_groups_sched_in
The visit_groups_merge function no longer takes a function pointer,
change the name to be similar to other sched_in functions.
Follow Kan Liang's <kan.liang@...ux.intel.com> and remove the single
caller flexible_sched_in and pinned_sched_in, moving functionality to
caller.
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
include/linux/perf_event.h | 4 +-
kernel/events/core.c | 77 ++++++++++++++++----------------------
2 files changed, 35 insertions(+), 46 deletions(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 86fb379296cb..1dd0250d72bf 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -802,8 +802,8 @@ struct perf_cpu_context {
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
struct list_head cgrp_cpuctx_entry;
- struct perf_event **visit_groups_merge_iterator_storage;
- int visit_groups_merge_iterator_storage_size;
+ struct perf_event **ctx_groups_sched_in_iterator_storage;
+ int ctx_groups_sched_in_iterator_storage_size;
#endif
struct list_head sched_cb_entry;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a66477ee196a..e714c2f9ea0d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2642,22 +2642,22 @@ static int __perf_install_in_context(void *info)
event->cgrp->css.cgroup);
/*
- * Ensure space for visit_groups_merge iterator storage. With
+ * Ensure space for ctx_groups_sched_in iterator storage. With
* cgroup profiling we may have an event at each depth plus
* system wide events.
*/
max_iterators = perf_event_cgroup_depth(event) + 1;
if (max_iterators >
- cpuctx->visit_groups_merge_iterator_storage_size) {
+ cpuctx->ctx_groups_sched_in_iterator_storage_size) {
struct perf_event **storage =
- krealloc(cpuctx->visit_groups_merge_iterator_storage,
+ krealloc(cpuctx->ctx_groups_sched_in_iterator_storage,
sizeof(struct perf_event *) * max_iterators,
GFP_KERNEL);
if (storage) {
- cpuctx->visit_groups_merge_iterator_storage
- = storage;
- cpuctx->visit_groups_merge_iterator_storage_size
- = max_iterators;
+ cpuctx->ctx_groups_sched_in_iterator_storage
+ = storage;
+ cpuctx->ctx_groups_sched_in_iterator_storage_size
+ = max_iterators;
} else {
WARN_ONCE(1, "Unable to increase iterator "
"storage for perf events with cgroups");
@@ -3466,32 +3466,33 @@ static int flexible_sched_in(struct perf_event_context *ctx,
* Without cgroups, with a task context, there may be per-CPU and any
* CPU events.
*/
-#define MIN_VISIT_GROUP_MERGE_ITERATORS 2
+#define MIN_CTX_GROUPS_SCHED_IN_ITERATORS 2
-static int visit_groups_merge(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx,
- bool is_pinned,
- int *data)
+static int ctx_groups_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ bool is_pinned,
+ int *data)
{
/*
* A set of iterators, the iterator for the visit is chosen by the
* group_index.
*/
#ifndef CONFIG_CGROUP_PERF
- struct perf_event *itrs[MIN_VISIT_GROUP_MERGE_ITERATORS];
+ struct perf_event *itrs[MIN_CTX_GROUPS_SCHED_IN_ITERATORS];
struct perf_event_heap heap = {
.storage = itrs,
.num_elements = 0,
- .max_elements = MIN_VISIT_GROUP_MERGE_ITERATORS
+ .max_elements = MIN_CTX_GROUPS_SCHED_IN_ITERATORS
};
#else
/*
* With cgroups usage space in the CPU context reserved for iterators.
*/
struct perf_event_heap heap = {
- .storage = cpuctx->visit_groups_merge_iterator_storage,
+ .storage = cpuctx->ctx_groups_sched_in_iterator_storage,
.num_elements = 0,
- .max_elements = cpuctx->visit_groups_merge_iterator_storage_size
+ .max_elements =
+ cpuctx->ctx_groups_sched_in_iterator_storage_size
};
#endif
int ret, cpu = smp_processor_id();
@@ -3623,27 +3624,6 @@ static int flexible_sched_in(struct perf_event_context *ctx,
return 0;
}
-static void
-ctx_pinned_sched_in(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx)
-{
- visit_groups_merge(ctx,
- cpuctx,
- /*is_pinned=*/true,
- NULL);
-}
-
-static void
-ctx_flexible_sched_in(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx)
-{
- int can_add_hw = 1;
-
- visit_groups_merge(ctx,
- cpuctx,
- /*is_pinned=*/false,
- &can_add_hw);
-}
static void
ctx_sched_in(struct perf_event_context *ctx,
@@ -3681,11 +3661,20 @@ ctx_sched_in(struct perf_event_context *ctx,
* in order to give them the best chance of going on.
*/
if (is_active & EVENT_PINNED)
- ctx_pinned_sched_in(ctx, cpuctx);
+ ctx_groups_sched_in(ctx,
+ cpuctx,
+ /*is_pinned=*/true,
+ NULL);
/* Then walk through the lower prio flexible groups */
- if (is_active & EVENT_FLEXIBLE)
- ctx_flexible_sched_in(ctx, cpuctx);
+ if (is_active & EVENT_FLEXIBLE) {
+ int can_add_hw = 1;
+
+ ctx_groups_sched_in(ctx,
+ cpuctx,
+ /*is_pinned=*/false,
+ &can_add_hw);
+ }
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
@@ -10243,12 +10232,12 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
cpuctx->ctx.pmu = pmu;
cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
#ifdef CONFIG_CGROUP_PERF
- cpuctx->visit_groups_merge_iterator_storage =
- kmalloc_array(MIN_VISIT_GROUP_MERGE_ITERATORS,
+ cpuctx->ctx_groups_sched_in_iterator_storage =
+ kmalloc_array(MIN_CTX_GROUPS_SCHED_IN_ITERATORS,
sizeof(struct perf_event *),
GFP_KERNEL);
- cpuctx->visit_groups_merge_iterator_storage_size =
- MIN_VISIT_GROUP_MERGE_ITERATORS;
+ cpuctx->ctx_groups_sched_in_iterator_storage_size =
+ MIN_CTX_GROUPS_SCHED_IN_ITERATORS;
#endif
__perf_mux_hrtimer_init(cpuctx, cpu);
}
--
2.22.0.410.gd8fdbe21b5-goog
Powered by blists - more mailing lists