[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191114003042.85252-6-irogers@google.com>
Date: Wed, 13 Nov 2019 16:30:37 -0800
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Masahiro Yamada <yamada.masahiro@...ionext.com>,
Kees Cook <keescook@...omium.org>,
Catalin Marinas <catalin.marinas@....com>,
Petr Mladek <pmladek@...e.com>,
Mauro Carvalho Chehab <mchehab+samsung@...nel.org>,
Qian Cai <cai@....pw>, Joe Lawrence <joe.lawrence@...hat.com>,
Tetsuo Handa <penguin-kernel@...ove.sakura.ne.jp>,
Sri Krishna chowdary <schowdary@...dia.com>,
"Uladzislau Rezki (Sony)" <urezki@...il.com>,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
Changbin Du <changbin.du@...el.com>,
Ard Biesheuvel <ardb@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
Kent Overstreet <kent.overstreet@...il.com>,
Gary Hook <Gary.Hook@....com>, Arnd Bergmann <arnd@...db.de>,
Kan Liang <kan.liang@...ux.intel.com>,
linux-kernel@...r.kernel.org
Cc: Stephane Eranian <eranian@...gle.com>,
Andi Kleen <ak@...ux.intel.com>,
Ian Rogers <irogers@...gle.com>
Subject: [PATCH v3 05/10] perf/cgroup: Grow per perf_cpu_context heap storage
Allow the per-CPU min heap storage to have sufficient space for per-cgroup
iterators.
Based-on-work-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
kernel/events/core.c | 47 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 47 insertions(+)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0dab60bf5935..3c44be7de44e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -892,6 +892,47 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
rcu_read_unlock();
}
+static int perf_cgroup_ensure_itr_storage_cap(struct perf_event *event,
+ struct cgroup_subsys_state *css)
+{
+ struct perf_cpu_context *cpuctx;
+ struct perf_event **storage;
+ int cpu, itr_cap, ret = 0;
+
+ /*
+ * Allow storage to have sufficent space for an iterator for each
+ * possibly nested cgroup plus an iterator for events with no cgroup.
+ */
+ for (itr_cap = 1; css; css = css->parent)
+ itr_cap++;
+
+ for_each_possible_cpu(cpu) {
+ cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu);
+ if (itr_cap <= cpuctx->itr_storage_cap)
+ continue;
+
+ storage = kmalloc_node(itr_cap * sizeof(struct perf_event *),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!storage) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ raw_spin_lock_irq(&cpuctx->ctx.lock);
+ if (cpuctx->itr_storage_cap < itr_cap) {
+ swap(cpuctx->itr_storage, storage);
+ if (storage == cpuctx->itr_default)
+ storage = NULL;
+ cpuctx->itr_storage_cap = itr_cap;
+ }
+ raw_spin_unlock_irq(&cpuctx->ctx.lock);
+
+ kfree(storage);
+ }
+
+ return ret;
+}
+
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
@@ -911,6 +952,10 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
goto out;
}
+ ret = perf_cgroup_ensure_itr_storage_cap(event, css);
+ if (ret)
+ goto out;
+
cgrp = container_of(css, struct perf_cgroup, css);
event->cgrp = cgrp;
@@ -3421,6 +3466,8 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
.size = 0,
.cap = cpuctx->itr_storage_cap,
};
+
+ lockdep_assert_held(&cpuctx->ctx.lock);
} else {
event_heap = (struct min_max_heap){
.data = itrs,
--
2.24.0.432.g9d3f5f5b63-goog
Powered by blists - more mailing lists