lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 29 Oct 2016 17:38:19 -0700
From:   David Carrillo-Cisneros <davidcc@...gle.com>
To:     linux-kernel@...r.kernel.org
Cc:     "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
        Thomas Gleixner <tglx@...utronix.de>,
        Andi Kleen <ak@...ux.intel.com>,
        Kan Liang <kan.liang@...el.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Vegard Nossum <vegard.nossum@...il.com>,
        Marcelo Tosatti <mtosatti@...hat.com>,
        Nilay Vaish <nilayvaish@...il.com>,
        Borislav Petkov <bp@...e.de>,
        Vikas Shivappa <vikas.shivappa@...ux.intel.com>,
        Ravi V Shankar <ravi.v.shankar@...el.com>,
        Fenghua Yu <fenghua.yu@...el.com>,
        Paul Turner <pjt@...gle.com>,
        Stephane Eranian <eranian@...gle.com>,
        David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [PATCH v3 22/46] perf/x86/intel/cmt: sync cgroups and intel_cmt device start/stop

Start/stop cgroup monitoring for cgroups when intel_cmt device is
started/stopped.

Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
---
 arch/x86/events/intel/cmt.c | 99 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 99 insertions(+)

diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index f7da8cf..5c64d94 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -766,6 +766,11 @@ static struct perf_cgroup *perf_cgroup_from_css(struct cgroup_subsys_state *css)
 	return container_of(css, struct perf_cgroup, css);
 }
 
+static struct monr *monr_from_css(struct cgroup_subsys_state *css)
+{
+	return ((struct monr *)perf_cgroup_from_css(css)->arch_info);
+}
+
 /**
  * perf_cgroup_mon_started() - Tell if cgroup is monitored by its own monr.
  *
@@ -1445,6 +1450,45 @@ static struct pmu intel_cmt_pmu = {
 	.read		     = intel_cmt_event_read,
 };
 
+#ifdef CONFIG_CGROUP_PERF
+static int __css_go_online(struct cgroup_subsys_state *css)
+{
+	struct perf_cgroup *lma, *cgrp = perf_cgroup_from_css(css);
+	int err;
+
+	if (!css->parent) {
+		err = __css_start_monitoring(css);
+		if (err)
+			return err;
+		return monr_apply_uflags(monr_from_css(css), NULL);
+	}
+	lma = perf_cgroup_find_lma(cgrp);
+	perf_cgroup_set_monr(cgrp, monr_from_perf_cgroup(lma));
+
+	return 0;
+}
+
+static void __css_go_offline(struct cgroup_subsys_state *css)
+{
+	struct monr *monr;
+	struct perf_cgroup *cgrp = perf_cgroup_from_css(css);
+
+	monr = monr_from_perf_cgroup(cgrp);
+	if (!perf_cgroup_mon_started(cgrp)) {
+		perf_cgroup_set_monr(cgrp, NULL);
+		return;
+	}
+
+	monr_apply_uflags(monr, pkg_uflags_zeroes);
+	/*
+	 * Terminate monr even if there are other users (events), monr will
+	 * stay zombie until those events are terminated.
+	 */
+	monr_destroy(monr);
+}
+
+#endif
+
 static void free_pkg_data(struct pkg_data *pkg_data)
 {
 	kfree(pkg_data);
@@ -1666,6 +1710,42 @@ static const struct x86_cpu_id intel_cmt_match[] = {
 	{}
 };
 
+#ifdef CONFIG_CGROUP_PERF
+/* Start monitoring for all cgroups in cgroup hierarchy. */
+static int __switch_monitoring_all_cgroups(bool online)
+{
+	int err = 0;
+	struct cgroup_subsys_state *css_root, *css = NULL;
+
+	lockdep_assert_held(&cmt_mutex);
+	monr_hrchy_assert_held_mutexes();
+
+	rcu_read_lock();
+	/* Get css for root cgroup */
+	css_root =  get_root_perf_css();
+
+	css_for_each_descendant_pre(css, css_root) {
+		if (!css_tryget_online(css))
+			continue;
+
+		rcu_read_unlock();
+
+		if (online)
+			err = __css_go_online(css);
+		else
+			__css_go_offline(css);
+		css_put(css);
+		if (err)
+			return err;
+
+		rcu_read_lock();
+	}
+	rcu_read_unlock();
+	return 0;
+}
+
+#endif
+
 static void cmt_dealloc(void)
 {
 	kfree(monr_hrchy_root);
@@ -1682,6 +1762,13 @@ static void cmt_stop(void)
 {
 	cpuhp_remove_state(CPUHP_AP_PERF_X86_CMT_ONLINE);
 	cpuhp_remove_state(CPUHP_PERF_X86_CMT_PREP);
+
+	mutex_lock(&cmt_mutex);
+	monr_hrchy_acquire_mutexes();
+	__switch_monitoring_all_cgroups(false);
+	monr_hrchy_release_mutexes();
+
+	mutex_unlock(&cmt_mutex);
 }
 
 static int __init cmt_alloc(void)
@@ -1743,6 +1830,18 @@ static int __init cmt_start(void)
 	}
 	event_attr_intel_cmt_llc_scale.event_str = str;
 
+#ifdef CONFIG_CGROUP_PERF
+	mutex_lock(&cmt_mutex);
+	monr_hrchy_acquire_mutexes();
+	err = __switch_monitoring_all_cgroups(true);
+	if (err)
+		__switch_monitoring_all_cgroups(false);
+	monr_hrchy_release_mutexes();
+	mutex_unlock(&cmt_mutex);
+	if (err)
+		goto rm_online;
+#endif
+
 	return 0;
 
 rm_online:
-- 
2.8.0.rc3.226.g39d4020

Powered by blists - more mailing lists