lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1556549045-71814-5-git-send-email-kan.liang@linux.intel.com>
Date:   Mon, 29 Apr 2019 07:44:05 -0700
From:   kan.liang@...ux.intel.com
To:     peterz@...radead.org, tglx@...utronix.de, mingo@...hat.com,
        linux-kernel@...r.kernel.org
Cc:     eranian@...gle.com, tj@...nel.org, ak@...ux.intel.com,
        Kan Liang <kan.liang@...ux.intel.com>
Subject: [PATCH 4/4] perf cgroup: Add fast path for cgroup switch

From: Kan Liang <kan.liang@...ux.intel.com>

Generic visit_groups_merge() is used in cgroup context switch to sched
in cgroup events, which has high overhead especially in frequent context
switch with several events and cgroups involved. Because it feeds all
events on a given CPU to pinned/flexible_sched_in() regardless the
cgroup.

Add a fast path to only feed the specific cgroup events to
pinned/flexible_sched_in() in cgroup context switch.

Don't need event_filter_match() to filter cgroup and CPU in fast path.
Only pmu_filter_match() is enough.

Don't need to specially handle system-wide event anymore.

Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
---
 kernel/events/core.c | 66 ++++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 48 insertions(+), 18 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5ecc048..16bb705 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3310,6 +3310,47 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
 }
 
+struct sched_in_data {
+	struct perf_event_context *ctx;
+	struct perf_cpu_context *cpuctx;
+	int can_add_hw;
+};
+
+#ifdef CONFIG_CGROUP_PERF
+
+static int cgroup_visit_groups_merge(struct perf_event_groups *groups, int cpu,
+				     int (*func)(struct perf_event *, void *, int (*)(struct perf_event *)),
+				     void *data)
+{
+	struct sched_in_data *sid = data;
+	struct cgroup_subsys_state *css;
+	struct perf_cgroup *cgrp;
+	struct perf_event *evt;
+	u64 cgrp_id;
+
+	for (css = &sid->cpuctx->cgrp->css; css; css = css->parent) {
+		/* root cgroup doesn't have events */
+		if (css->id == 1)
+			return 0;
+
+		cgrp = container_of(css, struct perf_cgroup, css);
+		cgrp_id = *this_cpu_ptr(cgrp->cgrp_id);
+		/* Only visit groups when the cgroup has events */
+		if (cgrp_id) {
+			evt = perf_event_groups_first_cgroup(groups, cpu, cgrp_id);
+			while (evt) {
+				if (func(evt, (void *)sid, pmu_filter_match))
+					break;
+				evt = perf_event_groups_next_cgroup(evt);
+			}
+			return 0;
+		}
+	}
+
+	return 0;
+}
+#endif
+
 static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
 			      int (*func)(struct perf_event *, void *, int (*)(struct perf_event *)),
 			      void *data)
@@ -3317,6 +3358,13 @@ static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
 	struct perf_event **evt, *evt1, *evt2;
 	int ret;
 
+#ifdef CONFIG_CGROUP_PERF
+	struct sched_in_data *sid = data;
+
+	/* fast path for cgroup switch */
+	if (sid->cpuctx->cgrp_switch)
+		return cgroup_visit_groups_merge(groups, cpu, func, data);
+#endif
 	evt1 = perf_event_groups_first(groups, -1);
 	evt2 = perf_event_groups_first(groups, cpu);
 
@@ -3342,12 +3390,6 @@ static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
 	return 0;
 }
 
-struct sched_in_data {
-	struct perf_event_context *ctx;
-	struct perf_cpu_context *cpuctx;
-	int can_add_hw;
-};
-
 static int pinned_sched_in(struct perf_event *event, void *data,
 			   int (*filter_match)(struct perf_event *))
 {
@@ -3356,12 +3398,6 @@ static int pinned_sched_in(struct perf_event *event, void *data,
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
-#ifdef CONFIG_CGROUP_PERF
-	/* Don't sched system-wide event when cgroup context switch */
-	if (sid->cpuctx->cgrp_switch && !event->cgrp)
-		return 0;
-#endif
-
 	if (!filter_match(event))
 		return 0;
 
@@ -3388,12 +3424,6 @@ static int flexible_sched_in(struct perf_event *event, void *data,
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
-#ifdef CONFIG_CGROUP_PERF
-	/* Don't sched system-wide event when cgroup context switch */
-	if (sid->cpuctx->cgrp_switch && !event->cgrp)
-		return 0;
-#endif
-
 	if (!filter_match(event))
 		return 0;
 
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ