lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 21 Mar 2016 02:53:04 -0700
From:	tip-bot for Vikas Shivappa <tipbot@...or.com>
To:	linux-tip-commits@...r.kernel.org
Cc:	torvalds@...ux-foundation.org, vincent.weaver@...ne.edu,
	eranian@...gle.com, tony.luck@...el.com,
	vikas.shivappa@...ux.intel.com, dsahern@...il.com,
	namhyung@...nel.org, mingo@...nel.org, dvlasenk@...hat.com,
	bp@...en8.de, brgerst@...il.com, peterz@...radead.org,
	luto@...capital.net, hpa@...or.com, jolsa@...hat.com,
	matt@...eblueprint.co.uk, acme@...hat.com, tglx@...utronix.de,
	alexander.shishkin@...ux.intel.com, linux-kernel@...r.kernel.org
Subject: [tip:perf/urgent] perf/x86/mbm: Implement RMID recycling

Commit-ID:  2d4de8376ff1d94a5070cfa9092c59bfdc4e693e
Gitweb:     http://git.kernel.org/tip/2d4de8376ff1d94a5070cfa9092c59bfdc4e693e
Author:     Vikas Shivappa <vikas.shivappa@...ux.intel.com>
AuthorDate: Thu, 10 Mar 2016 15:32:11 -0800
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Mon, 21 Mar 2016 09:08:20 +0100

perf/x86/mbm: Implement RMID recycling

RMID could be allocated or deallocated as part of RMID recycling.

When an RMID is allocated for MBM event, the MBM counter needs to be
initialized because next time we read the counter we need the previous
value to account for total bytes that went to the memory controller.

Similarly, when RMID is deallocated we need to update the ->count
variable.

Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Tony Luck <tony.luck@...el.com>
Acked-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Alexander Shishkin <alexander.shishkin@...ux.intel.com>
Cc: Andy Lutomirski <luto@...capital.net>
Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Brian Gerst <brgerst@...il.com>
Cc: David Ahern <dsahern@...il.com>
Cc: Denys Vlasenko <dvlasenk@...hat.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Jiri Olsa <jolsa@...hat.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Matt Fleming <matt@...eblueprint.co.uk>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Stephane Eranian <eranian@...gle.com>
Cc: Vince Weaver <vincent.weaver@...ne.edu>
Cc: fenghua.yu@...el.com
Cc: h.peter.anvin@...el.com
Cc: ravi.v.shankar@...el.com
Cc: vikas.shivappa@...el.com
Link: http://lkml.kernel.org/r/1457652732-4499-6-git-send-email-vikas.shivappa@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/events/intel/cqm.c | 31 +++++++++++++++++++++++++++----
 1 file changed, 27 insertions(+), 4 deletions(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 610bd8a..a98f472 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -450,6 +450,7 @@ struct rmid_read {
 
 static void __intel_cqm_event_count(void *info);
 static void init_mbm_sample(u32 rmid, u32 evt_type);
+static void __intel_mbm_event_count(void *info);
 
 static bool is_mbm_event(int e)
 {
@@ -476,8 +477,14 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
 			.rmid = old_rmid,
 		};
 
-		on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
-				 &rr, 1);
+		if (is_mbm_event(group->attr.config)) {
+			rr.evt_type = group->attr.config;
+			on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count,
+					 &rr, 1);
+		} else {
+			on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
+					 &rr, 1);
+		}
 		local64_set(&group->count, atomic64_read(&rr.value));
 	}
 
@@ -489,6 +496,22 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
 
 	raw_spin_unlock_irq(&cache_lock);
 
+	/*
+	 * If the allocation is for mbm, init the mbm stats.
+	 * Need to check if each event in the group is mbm event
+	 * because there could be multiple type of events in the same group.
+	 */
+	if (__rmid_valid(rmid)) {
+		event = group;
+		if (is_mbm_event(event->attr.config))
+			init_mbm_sample(rmid, event->attr.config);
+
+		list_for_each_entry(event, head, hw.cqm_group_entry) {
+			if (is_mbm_event(event->attr.config))
+				init_mbm_sample(rmid, event->attr.config);
+		}
+	}
+
 	return old_rmid;
 }
 
@@ -978,7 +1001,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
 			/* All tasks in a group share an RMID */
 			event->hw.cqm_rmid = rmid;
 			*group = iter;
-			if (is_mbm_event(event->attr.config))
+			if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
 				init_mbm_sample(rmid, event->attr.config);
 			return;
 		}
@@ -996,7 +1019,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
 	else
 		rmid = __get_rmid();
 
-	if (is_mbm_event(event->attr.config))
+	if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
 		init_mbm_sample(rmid, event->attr.config);
 
 	event->hw.cqm_rmid = rmid;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ