lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230612093541.454144142@infradead.org>
Date:   Mon, 12 Jun 2023 11:08:07 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     torvalds@...ux-foundation.org, keescook@...omium.org,
        gregkh@...uxfoundation.org, pbonzini@...hat.com
Cc:     masahiroy@...nel.org, nathan@...nel.org, ndesaulniers@...gle.com,
        nicolas@...sle.eu, catalin.marinas@....com, will@...nel.org,
        vkoul@...nel.org, trix@...hat.com, ojeda@...nel.org,
        peterz@...radead.org, mingo@...hat.com, longman@...hat.com,
        boqun.feng@...il.com, dennis@...nel.org, tj@...nel.org,
        cl@...ux.com, acme@...nel.org, mark.rutland@....com,
        alexander.shishkin@...ux.intel.com, jolsa@...nel.org,
        namhyung@...nel.org, irogers@...gle.com, adrian.hunter@...el.com,
        juri.lelli@...hat.com, vincent.guittot@...aro.org,
        dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
        mgorman@...e.de, bristot@...hat.com, vschneid@...hat.com,
        paulmck@...nel.org, frederic@...nel.org, quic_neeraju@...cinc.com,
        joel@...lfernandes.org, josh@...htriplett.org,
        mathieu.desnoyers@...icios.com, jiangshanlai@...il.com,
        rientjes@...gle.com, vbabka@...e.cz, roman.gushchin@...ux.dev,
        42.hyeyoo@...il.com, apw@...onical.com, joe@...ches.com,
        dwaipayanray1@...il.com, lukas.bulwahn@...il.com,
        john.johansen@...onical.com, paul@...l-moore.com,
        jmorris@...ei.org, serge@...lyn.com, linux-kbuild@...r.kernel.org,
        linux-kernel@...r.kernel.org, dmaengine@...r.kernel.org,
        llvm@...ts.linux.dev, linux-perf-users@...r.kernel.org,
        rcu@...r.kernel.org, linux-security-module@...r.kernel.org,
        tglx@...utronix.de, ravi.bangoria@....com, error27@...il.com,
        luc.vanoostenryck@...il.com
Subject: [PATCH v3 54/57] perf: Misc cleanups


Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 kernel/events/core.c |   64 +++++++++++++++++++--------------------------------
 1 file changed, 25 insertions(+), 39 deletions(-)

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1274,13 +1274,11 @@ perf_event_ctx_lock_nested(struct perf_e
 	struct perf_event_context *ctx;
 
 again:
-	rcu_read_lock();
-	ctx = READ_ONCE(event->ctx);
-	if (!refcount_inc_not_zero(&ctx->refcount)) {
-		rcu_read_unlock();
-		goto again;
+	scoped_guard (rcu) {
+		ctx = READ_ONCE(event->ctx);
+		if (!refcount_inc_not_zero(&ctx->refcount))
+			goto again;
 	}
-	rcu_read_unlock();
 
 	mutex_lock_nested(&ctx->mutex, nesting);
 	if (event->ctx != ctx) {
@@ -2254,7 +2252,7 @@ event_sched_out(struct perf_event *event
 	 */
 	list_del_init(&event->active_list);
 
-	perf_pmu_disable(event->pmu);
+	guard(perf_pmu_disable)(event->pmu);
 
 	event->pmu->del(event, 0);
 	event->oncpu = -1;
@@ -2288,8 +2286,6 @@ event_sched_out(struct perf_event *event
 		ctx->nr_freq--;
 	if (event->attr.exclusive || !cpc->active_oncpu)
 		cpc->exclusive = 0;
-
-	perf_pmu_enable(event->pmu);
 }
 
 static void
@@ -3219,7 +3215,8 @@ static void __pmu_ctx_sched_out(struct p
 	if (!event_type)
 		return;
 
-	perf_pmu_disable(pmu);
+	guard(perf_pmu_disable)(pmu);
+
 	if (event_type & EVENT_PINNED) {
 		list_for_each_entry_safe(event, tmp,
 					 &pmu_ctx->pinned_active,
@@ -3239,7 +3236,6 @@ static void __pmu_ctx_sched_out(struct p
 		 */
 		pmu_ctx->rotate_necessary = 0;
 	}
-	perf_pmu_enable(pmu);
 }
 
 static void
@@ -3586,13 +3582,10 @@ static void __perf_pmu_sched_task(struct
 	if (WARN_ON_ONCE(!pmu->sched_task))
 		return;
 
-	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
-	perf_pmu_disable(pmu);
+	guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx);
+	guard(perf_pmu_disable)(pmu);
 
 	pmu->sched_task(cpc->task_epc, sched_in);
-
-	perf_pmu_enable(pmu);
-	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
 }
 
 static void perf_pmu_sched_task(struct task_struct *prev,
@@ -12655,8 +12648,6 @@ static void __perf_pmu_install_event(str
 				     struct perf_event_context *ctx,
 				     int cpu, struct perf_event *event)
 {
-	struct perf_event_pmu_context *epc;
-
 	/*
 	 * Now that the events are unused, put their old ctx and grab a
 	 * reference on the new context.
@@ -12665,8 +12656,7 @@ static void __perf_pmu_install_event(str
 	get_ctx(ctx);
 
 	event->cpu = cpu;
-	epc = find_get_pmu_context(pmu, ctx, event);
-	event->pmu_ctx = epc;
+	event->pmu_ctx = find_get_pmu_context(pmu, ctx, event);
 
 	if (event->state >= PERF_EVENT_STATE_OFF)
 		event->state = PERF_EVENT_STATE_INACTIVE;
@@ -12815,12 +12805,12 @@ perf_event_exit_event(struct perf_event
 
 static void perf_event_exit_task_context(struct task_struct *child)
 {
-	struct perf_event_context *child_ctx, *clone_ctx = NULL;
+	struct perf_event_context *clone_ctx = NULL;
 	struct perf_event *child_event, *next;
 
 	WARN_ON_ONCE(child != current);
 
-	child_ctx = perf_pin_task_context(child);
+	CLASS(pin_task_ctx, child_ctx)(child);
 	if (!child_ctx)
 		return;
 
@@ -12834,27 +12824,27 @@ static void perf_event_exit_task_context
 	 * without ctx::mutex (it cannot because of the move_group double mutex
 	 * lock thing). See the comments in perf_install_in_context().
 	 */
-	mutex_lock(&child_ctx->mutex);
+	guard(mutex)(&child_ctx->mutex);
 
 	/*
 	 * In a single ctx::lock section, de-schedule the events and detach the
 	 * context from the task such that we cannot ever get it scheduled back
 	 * in.
 	 */
-	raw_spin_lock_irq(&child_ctx->lock);
-	task_ctx_sched_out(child_ctx, EVENT_ALL);
+	scoped_guard (raw_spinlock_irq, &child_ctx->lock) {
+		task_ctx_sched_out(child_ctx, EVENT_ALL);
 
-	/*
-	 * Now that the context is inactive, destroy the task <-> ctx relation
-	 * and mark the context dead.
-	 */
-	RCU_INIT_POINTER(child->perf_event_ctxp, NULL);
-	put_ctx(child_ctx); /* cannot be last */
-	WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
-	put_task_struct(current); /* cannot be last */
+		/*
+		 * Now that the context is inactive, destroy the task <-> ctx
+		 * relation and mark the context dead.
+		 */
+		RCU_INIT_POINTER(child->perf_event_ctxp, NULL);
+		put_ctx(child_ctx); /* cannot be last */
+		WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
+		put_task_struct(current); /* cannot be last */
 
-	clone_ctx = unclone_ctx(child_ctx);
-	raw_spin_unlock_irq(&child_ctx->lock);
+		clone_ctx = unclone_ctx(child_ctx);
+	}
 
 	if (clone_ctx)
 		put_ctx(clone_ctx);
@@ -12868,10 +12858,6 @@ static void perf_event_exit_task_context
 
 	list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
 		perf_event_exit_event(child_event, child_ctx);
-
-	mutex_unlock(&child_ctx->mutex);
-
-	put_ctx(child_ctx);
 }
 
 /*


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ