lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240412210756.309828-12-weilin.wang@intel.com>
Date: Fri, 12 Apr 2024 14:07:51 -0700
From: weilin.wang@...el.com
To: weilin.wang@...el.com,
	Ian Rogers <irogers@...gle.com>,
	Kan Liang <kan.liang@...ux.intel.com>,
	Namhyung Kim <namhyung@...nel.org>,
	Arnaldo Carvalho de Melo <acme@...nel.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...hat.com>,
	Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
	Jiri Olsa <jolsa@...nel.org>,
	Adrian Hunter <adrian.hunter@...el.com>
Cc: linux-perf-users@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	Perry Taylor <perry.taylor@...el.com>,
	Samantha Alt <samantha.alt@...el.com>,
	Caleb Biggers <caleb.biggers@...el.com>
Subject: [RFC PATCH v5 11/16] perf stat: Add partial support on MSR in hardware-grouping

From: Weilin Wang <weilin.wang@...el.com>

Add MSR usage into consideration when grouping. Each group can only
include one event that requires one specific MSR. Currently, we only
support events that requries one MSR. For some OCR events that have
multiple MSRs in their MSRIndex field, this commit will treat them as
one "large MSR". We're planning to improve this part in future.

Signed-off-by: Weilin Wang <weilin.wang@...el.com>
---
 tools/perf/pmu-events/jevents.py   |  4 +++-
 tools/perf/pmu-events/pmu-events.h |  6 ++++++
 tools/perf/util/metricgroup.c      | 27 ++++++++++++++++++++++-----
 3 files changed, 31 insertions(+), 6 deletions(-)

diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index 7cfd86d77fea..66531c2df224 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -54,7 +54,9 @@ _json_event_attributes = [
     # Short things in alphabetical order.
     'compat', 'deprecated', 'perpkg', 'unit',
     # Longer things (the last won't be iterated over during decompress).
-    'long_desc'
+    'long_desc',
+    # MSRIndex required by the event. NULL if no MSR is required.
+    'msr'
 ]
 
 # Attributes that are in pmu_unit_layout.
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index 5b42a18693cf..76ec2b431dce 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -54,6 +54,12 @@ struct pmu_event {
 	const char *unit;
 	bool perpkg;
 	bool deprecated;
+	/*
+	 * MSR is another resource that restricts grouping. Currently, we
+	 * support only MSRIndex 0x3F6 and 0x3F7.  TODO: add support for all the
+	 * MSRs related to event grouping.
+	 */
+	const char *msr;
 };
 
 struct pmu_metric {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index b9e46dff1e17..9548654c9f6d 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -193,6 +193,7 @@ struct metricgroup__event_info {
 	 * during the event grouping.
 	 */
 	bool free_counter;
+	const char *msr;
 	/** The counters the event allowed to be collected on. */
 	DECLARE_BITMAP(counters, NR_COUNTERS);
 };
@@ -240,6 +241,7 @@ struct metricgroup__group {
 	DECLARE_BITMAP(fixed_counters, NR_COUNTERS);
 	/** Head to the list of event names in this group*/
 	struct list_head event_head;
+	const char *msr;
 };
 
 struct metricgroup__group_events {
@@ -1747,6 +1749,7 @@ static void metricgroup__free_pmu_info(struct list_head *pmu_info_list)
 static struct metricgroup__event_info *event_info__new(const char *name,
 						      const char *pmu_name,
 						      const char *counter,
+						      const char *msr,
 						      bool free_counter)
 {
 	int ret = 0;
@@ -1764,6 +1767,11 @@ static struct metricgroup__event_info *event_info__new(const char *name,
 	e->pmu_name = strdup(pmu_name);
 	if (!e->pmu_name || !e->name)
 		return NULL;
+	if (msr) {
+		e->msr = strdup(msr);
+		if (!e->msr)
+			return NULL;
+	}
 	e->free_counter = free_counter;
 	if (free_counter) {
 		ret = set_counter_bitmap(0, e->counters);
@@ -1801,7 +1809,8 @@ static int metricgroup__add_metric_event_callback(const struct pmu_event *pe,
 	if (!strcasecmp(pe->name, d->event_name)) {
 		if (!pe->counters_list)
 			return -EINVAL;
-		event = event_info__new(d->event_id, pe->pmu, pe->counters_list, /*free_counter=*/false);
+		event = event_info__new(d->event_id, pe->pmu, pe->counters_list,
+					pe->msr, /*free_counter=*/false);
 		if (!event)
 			return -ENOMEM;
 		list_add(&event->nd, d->list);
@@ -1927,7 +1936,9 @@ static int find_and_set_counters(struct metricgroup__event_info *e,
 {
 	int ret;
 	unsigned long find_bit = 0;
-
+	if (e->msr != NULL && current_group->msr != NULL && !strcmp(e->msr, current_group->msr)) {
+		pr_debug("current group uses the required MSR %s already\n", e->msr);
+		return -ENOSPC;
 	if (e->free_counter)
 		return 0;
 	if (e->fixed_counter) {
@@ -1964,11 +1975,17 @@ static int _insert_event(struct metricgroup__event_info *e,
 		list_add(&event->nd, &group->event_head);
 	else
 		list_add_tail(&event->nd, &group->event_head);
+	if (e->msr != NULL) {
+		group->msr = strdup(e->msr);
+		pr_debug("Add event %s to group, uses MSR %s\n", e->name, e->msr);
+		if (!group->msr)
+			return -ENOMEM;
+	}
 	return 0;
 }
 
 /**
- * Insert the new_group node at the end of the group list.
+ * Initialize the new group and insert it to the end of the group list.
  */
 static int insert_new_group(struct list_head *head,
 			   struct metricgroup__group *new_group,
@@ -2185,7 +2202,7 @@ static int create_grouping(struct list_head *pmu_info_list,
 		bitmap_scnprintf(e->counters, NR_COUNTERS, bit_buf, NR_COUNTERS);
 		pr_debug("Event name %s, [pmu]=%s, [counters]=%s\n", e->name,
 			e->pmu_name, bit_buf);
-		ret = assign_event_grouping(e, pmu_info_list, &groups);
+		ret = assign_event_grouping(e, pmu_info_list, grouping);
 		if (ret)
 			goto out;
 	}
@@ -2231,7 +2248,7 @@ static int hw_aware_build_grouping(struct expr_parse_ctx *ctx __maybe_unused,
 		if (is_special_event(id)) {
 			struct metricgroup__event_info *event;
 
-			event = event_info__new(id, "default_core", "0",
+			event = event_info__new(id, "default_core", "0", /*msr=*/NULL,
 						/*free_counter=*/true);
 			if (!event)
 				goto err_out;
-- 
2.42.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ