lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 29 Apr 2019 07:44:02 -0700
From:   kan.liang@...ux.intel.com
To:     peterz@...radead.org, tglx@...utronix.de, mingo@...hat.com,
        linux-kernel@...r.kernel.org
Cc:     eranian@...gle.com, tj@...nel.org, ak@...ux.intel.com,
        Kan Liang <kan.liang@...ux.intel.com>
Subject: [PATCH 1/4] perf: Fix system-wide events miscounting during cgroup monitoring

From: Kan Liang <kan.liang@...ux.intel.com>

When counting system-wide events and cgroup events simultaneously, the
value of system-wide events are miscounting. For example,

    perf stat -e cycles,instructions -e cycles,instructions -G
cgroup1,cgroup1,cgroup2,cgroup2 -a -e cycles,instructions -I 1000

     1.096265502     12,375,398,872      cycles              cgroup1
     1.096265502      8,396,184,503      instructions        cgroup1
 #    0.10  insn per cycle
     1.096265502    109,609,027,112      cycles              cgroup2
     1.096265502     11,533,690,148      instructions        cgroup2
 #    0.14  insn per cycle
     1.096265502    121,672,937,058      cycles
     1.096265502     19,331,496,727      instructions               #
0.24  insn per cycle

The events are identical events for system-wide and cgroup. The
value of system-wide events is less than the sum of cgroup events,
which is wrong.

Both system-wide and cgroup are per-cpu. They share the same cpuctx
groups, cpuctx->flexible_groups/pinned_groups.
In context switch, cgroup switch tries to schedule all the events in
the cpuctx groups. The unmatched cgroup events can be filtered by its
event->cgrp. However, system-wide events, which event->cgrp is NULL, are
unconditionally switched, which causes miscounting.

Introduce cgrp_switch in cpuctx to indicate the cgroup context switch.
If it's system-wide event in context switch, don't try to switch it.

Fixes: e5d1367f17ba ("perf: Add cgroup support")
Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
---
 include/linux/perf_event.h |  1 +
 kernel/events/core.c       | 30 ++++++++++++++++++++++++++++--
 2 files changed, 29 insertions(+), 2 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e47ef76..039e2f2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -795,6 +795,7 @@ struct perf_cpu_context {
 #ifdef CONFIG_CGROUP_PERF
 	struct perf_cgroup		*cgrp;
 	struct list_head		cgrp_cpuctx_entry;
+	unsigned int			cgrp_switch		:1;
 #endif
 
 	struct list_head		sched_cb_entry;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dc7dead..388dd42 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -809,6 +809,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
 
 		perf_ctx_lock(cpuctx, cpuctx->task_ctx);
 		perf_pmu_disable(cpuctx->ctx.pmu);
+		cpuctx->cgrp_switch = true;
 
 		if (mode & PERF_CGROUP_SWOUT) {
 			cpu_ctx_sched_out(cpuctx, EVENT_ALL);
@@ -832,6 +833,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
 							     &cpuctx->ctx);
 			cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
 		}
+		cpuctx->cgrp_switch = false;
 		perf_pmu_enable(cpuctx->ctx.pmu);
 		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
 	}
@@ -2944,13 +2946,25 @@ static void ctx_sched_out(struct perf_event_context *ctx,
 
 	perf_pmu_disable(ctx->pmu);
 	if (is_active & EVENT_PINNED) {
-		list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
+		list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) {
+#ifdef CONFIG_CGROUP_PERF
+			/* Don't sched system-wide event when cgroup context switch */
+			if (cpuctx->cgrp_switch && !event->cgrp)
+				continue;
+#endif
 			group_sched_out(event, cpuctx, ctx);
+		}
 	}
 
 	if (is_active & EVENT_FLEXIBLE) {
-		list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
+		list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) {
+#ifdef CONFIG_CGROUP_PERF
+			/* Don't sched system-wide event when cgroup context switch */
+			if (cpuctx->cgrp_switch && !event->cgrp)
+				continue;
+#endif
 			group_sched_out(event, cpuctx, ctx);
+		}
 	}
 	perf_pmu_enable(ctx->pmu);
 }
@@ -3280,6 +3294,12 @@ static int pinned_sched_in(struct perf_event *event, void *data)
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
+#ifdef CONFIG_CGROUP_PERF
+	/* Don't sched system-wide event when cgroup context switch */
+	if (sid->cpuctx->cgrp_switch && !event->cgrp)
+		return 0;
+#endif
+
 	if (!event_filter_match(event))
 		return 0;
 
@@ -3305,6 +3325,12 @@ static int flexible_sched_in(struct perf_event *event, void *data)
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
+#ifdef CONFIG_CGROUP_PERF
+	/* Don't sched system-wide event when cgroup context switch */
+	if (sid->cpuctx->cgrp_switch && !event->cgrp)
+		return 0;
+#endif
+
 	if (!event_filter_match(event))
 		return 0;
 
-- 
2.7.4

Powered by blists - more mailing lists