lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240612133911.3447625-2-ben.gainey@arm.com>
Date: Wed, 12 Jun 2024 14:39:08 +0100
From: Ben Gainey <ben.gainey@....com>
To: peterz@...radead.org,
	mingo@...hat.com,
	acme@...nel.org,
	namhyung@...nel.org
Cc: james.clark@....com,
	mark.rutland@....com,
	alexander.shishkin@...ux.intel.com,
	jolsa@...nel.org,
	irogers@...gle.com,
	adrian.hunter@...el.com,
	linux-perf-users@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	Ben Gainey <ben.gainey@....com>
Subject: [PATCH v8 1/4] perf: Rename perf_event_context.nr_pending to nr_no_switch_fast.

nr_pending counts the number of events in the context that
either pending_sigtrap or pending_work, but it is used
to prevent taking the fast path in perf_event_context_sched_out.

Renamed to reflect what it is used for, rather than what it
counts. This change allows using the field to track other
event properties that also require skipping the fast path
without possible confusion over the name.

Signed-off-by: Ben Gainey <ben.gainey@....com>
---
 include/linux/perf_event.h |  5 ++++-
 kernel/events/core.c       | 14 +++++++-------
 2 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a5304ae8c654..c0c6c70bb9f1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -962,12 +962,15 @@ struct perf_event_context {
 	struct rcu_head			rcu_head;
 
 	/*
+	 * The count of events for which using the switch-out fast path
+	 * should be avoided.
+	 *
 	 * Sum (event->pending_sigtrap + event->pending_work)
 	 *
 	 * The SIGTRAP is targeted at ctx->task, as such it won't do changing
 	 * that until the signal is delivered.
 	 */
-	local_t				nr_pending;
+	local_t				nr_no_switch_fast;
 };
 
 /*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8f908f077935..c9cb22304d11 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2295,7 +2295,7 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
 			task_work_add(current, &event->pending_task, TWA_RESUME);
 		}
 		if (dec)
-			local_dec(&event->ctx->nr_pending);
+			local_dec(&event->ctx->nr_no_switch_fast);
 	}
 
 	perf_event_set_state(event, state);
@@ -3531,9 +3531,9 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
 
 			perf_ctx_disable(ctx, false);
 
-			/* PMIs are disabled; ctx->nr_pending is stable. */
-			if (local_read(&ctx->nr_pending) ||
-			    local_read(&next_ctx->nr_pending)) {
+			/* PMIs are disabled; ctx->nr_no_switch_fast is stable. */
+			if (local_read(&ctx->nr_no_switch_fast) ||
+			    local_read(&next_ctx->nr_no_switch_fast)) {
 				/*
 				 * Must not swap out ctx when there's pending
 				 * events that rely on the ctx->task relation.
@@ -6768,7 +6768,7 @@ static void __perf_pending_irq(struct perf_event *event)
 		if (event->pending_sigtrap) {
 			event->pending_sigtrap = 0;
 			perf_sigtrap(event);
-			local_dec(&event->ctx->nr_pending);
+			local_dec(&event->ctx->nr_no_switch_fast);
 		}
 		if (event->pending_disable) {
 			event->pending_disable = 0;
@@ -6841,7 +6841,7 @@ static void perf_pending_task(struct callback_head *head)
 	if (event->pending_work) {
 		event->pending_work = 0;
 		perf_sigtrap(event);
-		local_dec(&event->ctx->nr_pending);
+		local_dec(&event->ctx->nr_no_switch_fast);
 	}
 
 	if (rctx >= 0)
@@ -9711,7 +9711,7 @@ static int __perf_event_overflow(struct perf_event *event,
 			pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
 		if (!event->pending_sigtrap) {
 			event->pending_sigtrap = pending_id;
-			local_inc(&event->ctx->nr_pending);
+			local_inc(&event->ctx->nr_no_switch_fast);
 		} else if (event->attr.exclude_kernel && valid_sample) {
 			/*
 			 * Should not be able to return to user space without
-- 
2.45.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ