lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 14 Mar 2022 01:21:43 +0800
From:   Wen Yang <simon.wy@...baba-inc.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Thomas Gleixner <tglx@...utronix.de>
Cc:     Wen Yang <simon.wy@...baba-inc.com>,
        Stephane Eranian <eranian@...gle.com>,
        Mark Rutland <mark.rutland@....com>,
        Jiri Olsa <jolsa@...hat.com>,
        Namhyung Kim <namhyung@...nel.org>,
        Borislav Petkov <bp@...en8.de>, x86@...nel.org,
        Wen Yang <wenyang@...ux.alibaba.com>,
        "H. Peter Anvin" <hpa@...or.com>, linux-perf-users@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: [RESEND PATCH v2 2/3] perf/x86: improve the event scheduling to avoid unnecessary x86_pmu_{stop|start}

During long-term monitoring of CPI data on some cloud servers in some
clusters, data anomalies are occasionally found.

When perf events are frequently created and deleted, the PMU counters may
switch, and x86_pmu_{stop|start} is also called frequently.
When we stop, we update the value, then reprogram on another counter and
continue. It is generally OK, but when NMI watchdog occupies this fixed
counter *cycles*, the monitoring program may only use one general counter.
At this time, reprogramming may not be effective.

The fields msk_counters and msk_events are added to indicate currently
used counters and events so that the used ones can be skipped in both
__perf_sched_find_counter and perf_sched_next_event functions to avoid
unnecessary x86_pmu_{stop|start}.

Signed-off-by: Wen Yang <simon.wy@...baba-inc.com>
Cc: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Stephane Eranian <eranian@...gle.com>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Mark Rutland <mark.rutland@....com>
Cc: Alexander Shishkin <alexander.shishkin@...ux.intel.com>
Cc: Jiri Olsa <jolsa@...hat.com>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Borislav Petkov <bp@...en8.de>
Cc: x86@...nel.org
Cc: Wen Yang <wenyang@...ux.alibaba.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: linux-perf-users@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
---
 arch/x86/events/core.c | 103 ++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 90 insertions(+), 13 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index b14fb1b..b6ea220 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -796,33 +796,70 @@ struct perf_sched {
 	int			max_events;
 	int			max_gp;
 	int			saved_states;
+	u64			msk_counters;
+	u64			msk_events;
 	struct event_constraint	**constraints;
 	struct sched_state	state;
 	struct sched_state	saved[SCHED_STATES_MAX];
 };
 
+static int perf_sched_calc_weight(struct event_constraint **constraints,
+		int num, int wmin, int wmax, u64 msk_events)
+{
+	int min_weight;
+	int idx;
+
+	if (!msk_events) {
+		min_weight = wmin;
+		goto out;
+	}
+
+	min_weight = wmax;
+	for (idx = 0; idx < num; idx++) {
+		if (msk_events & BIT_ULL(idx))
+			continue;
+
+		min_weight = min(min_weight, constraints[idx]->weight);
+	}
+
+out:
+	return min_weight;
+}
+
+static int perf_sched_calc_event(struct event_constraint **constraints,
+		int num, int weight, u64 msk_events)
+{
+	int idx;
+
+	for (idx = 0; idx < num; idx++) {
+		if (msk_events & BIT_ULL(idx))
+			continue;
+
+		if (constraints[idx]->weight == weight)
+			break;
+	}
+
+	/* start with min weight */
+	return idx;
+}
+
 /*
  * Initialize iterator that runs through all events and counters.
  */
 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
-			    int num, int wmin, int wmax, int gpmax)
+			    int num, int wmin, int wmax, int gpmax, u64 mevt, u64 mcnt)
 {
-	int idx;
-
 	memset(sched, 0, sizeof(*sched));
 	sched->max_events	= num;
 	sched->max_weight	= wmax;
 	sched->max_gp		= gpmax;
 	sched->constraints	= constraints;
+	sched->msk_events	= mevt;
+	sched->msk_counters	= mcnt;
 
-	for (idx = 0; idx < num; idx++) {
-		if (constraints[idx]->weight == wmin)
-			break;
-	}
-
-	sched->state.event	= idx;		/* start with min weight */
-	sched->state.weight	= wmin;
-	sched->state.unassigned	= num;
+	sched->state.weight = perf_sched_calc_weight(constraints, num, wmin, wmax, mcnt);
+	sched->state.event = perf_sched_calc_event(constraints, num, sched->state.weight, mevt);
+	sched->state.unassigned = num - hweight_long(mevt);
 }
 
 static void perf_sched_save_state(struct perf_sched *sched)
@@ -874,6 +911,9 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
 		for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
 			u64 mask = BIT_ULL(idx);
 
+			if (sched->msk_counters & mask)
+				continue;
+
 			if (sched->state.used & mask)
 				continue;
 
@@ -890,6 +930,9 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
 		if (c->flags & PERF_X86_EVENT_PAIR)
 			mask |= mask << 1;
 
+		if (sched->msk_counters & mask)
+			continue;
+
 		if (sched->state.used & mask)
 			continue;
 
@@ -921,6 +964,12 @@ static bool perf_sched_find_counter(struct perf_sched *sched)
 	return true;
 }
 
+static void ignore_used_index(u64 mask, int *index)
+{
+	while (mask & BIT_ULL(*index))
+		++*index;
+}
+
 /*
  * Go through all unassigned events and find the next one to schedule.
  * Take events with the least weight first. Return true on success.
@@ -935,9 +984,12 @@ static bool perf_sched_next_event(struct perf_sched *sched)
 	do {
 		/* next event */
 		sched->state.event++;
+		ignore_used_index(sched->msk_events, &sched->state.event);
 		if (sched->state.event >= sched->max_events) {
 			/* next weight */
 			sched->state.event = 0;
+			ignore_used_index(sched->msk_events, &sched->state.event);
+
 			sched->state.weight++;
 			if (sched->state.weight > sched->max_weight)
 				return false;
@@ -950,12 +1002,28 @@ static bool perf_sched_next_event(struct perf_sched *sched)
 	return true;
 }
 
+static void perf_sched_obtain_used_registers(int *assign, int n, u64 *events, u64 *counters)
+{
+	int idx;
+
+	*events = 0;
+	*counters = 0;
+	for (idx = 0; idx < n; idx++) {
+		if (assign[idx] != -1) {
+			*events |= BIT_ULL(idx);
+			*counters |= BIT_ULL(assign[idx]);
+		}
+	}
+}
+
 static int __perf_assign_events(struct event_constraint **constraints, int n,
 			int wmin, int wmax, int gpmax, int *assign)
 {
+	u64 mevt, mcnt;
 	struct perf_sched sched;
 
-	perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
+	perf_sched_obtain_used_registers(assign, n, &mevt, &mcnt);
+	perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax, mevt, mcnt);
 
 	do {
 		if (!perf_sched_find_counter(&sched))
@@ -980,6 +1048,8 @@ int perf_assign_events(struct perf_event **event_list,
 	int unsched = 0;
 	int i;
 
+	memset(assign, -1, n * sizeof(int));
+
 	/*
 	 * fastpath, try to reuse previous register
 	 */
@@ -1012,10 +1082,17 @@ int perf_assign_events(struct perf_event **event_list,
 	}
 
 	/* slow path */
-	if (i != n)
+	if (i != n) {
 		unsched = __perf_assign_events(constraints, n,
 				wmin, wmax, gpmax, assign);
 
+		if (unsched) {
+			memset(assign, -1, n * sizeof(int));
+			unsched = __perf_assign_events(constraints, n,
+					wmin, wmax, gpmax, assign);
+		}
+	}
+
 	return unsched;
 }
 EXPORT_SYMBOL_GPL(perf_assign_events);
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ