[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1479894292-16277-4-git-send-email-kan.liang@intel.com>
Date: Wed, 23 Nov 2016 04:44:41 -0500
From: kan.liang@...el.com
To: peterz@...radead.org, mingo@...hat.com, acme@...nel.org,
linux-kernel@...r.kernel.org
Cc: alexander.shishkin@...ux.intel.com, tglx@...utronix.de,
namhyung@...nel.org, jolsa@...nel.org, adrian.hunter@...el.com,
wangnan0@...wei.com, mark.rutland@....com, andi@...stfloor.org,
Kan Liang <kan.liang@...el.com>
Subject: [PATCH 03/14] perf/x86: output multiplexing overhead
From: Kan Liang <kan.liang@...el.com>
Multiplexing overhead is one of the key overhead when the number of
events is more than available counters.
Signed-off-by: Kan Liang <kan.liang@...el.com>
---
include/linux/perf_event.h | 2 ++
include/uapi/linux/perf_event.h | 1 +
kernel/events/core.c | 16 ++++++++++++++++
3 files changed, 19 insertions(+)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 632647f..f72b97a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -793,6 +793,8 @@ struct perf_cpu_context {
struct list_head sched_cb_entry;
int sched_cb_usage;
+
+ struct perf_overhead_entry mux_overhead;
};
struct perf_output_handle {
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 071323d..9124c7c 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -993,6 +993,7 @@ struct perf_branch_entry {
enum perf_record_overhead_type {
PERF_NMI_OVERHEAD = 0,
+ PERF_MUX_OVERHEAD,
PERF_OVERHEAD_MAX,
};
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d82e6ca..9934059 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1829,6 +1829,11 @@ event_sched_out(struct perf_event *event,
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
+ if (log_overhead && cpuctx->mux_overhead.nr) {
+ cpuctx->mux_overhead.cpu = smp_processor_id();
+ perf_log_overhead(event, PERF_MUX_OVERHEAD, &cpuctx->mux_overhead);
+ }
+
perf_pmu_enable(event->pmu);
}
@@ -3330,9 +3335,17 @@ static void rotate_ctx(struct perf_event_context *ctx)
list_rotate_left(&ctx->flexible_groups);
}
+static void
+perf_caculate_mux_overhead(struct perf_cpu_context *cpuctx, u64 time)
+{
+ cpuctx->mux_overhead.nr++;
+ cpuctx->mux_overhead.time += time;
+}
+
static int perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
+ u64 start_clock, end_clock;
int rotate = 0;
if (cpuctx->ctx.nr_events) {
@@ -3349,6 +3362,7 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
if (!rotate)
goto done;
+ start_clock = perf_clock();
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
@@ -3364,6 +3378,8 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+ end_clock = perf_clock();
+ perf_caculate_mux_overhead(cpuctx, end_clock - start_clock);
done:
return rotate;
--
2.5.5
Powered by blists - more mailing lists