[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230612093540.407316252@infradead.org>
Date: Mon, 12 Jun 2023 11:07:53 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: torvalds@...ux-foundation.org, keescook@...omium.org,
gregkh@...uxfoundation.org, pbonzini@...hat.com
Cc: masahiroy@...nel.org, nathan@...nel.org, ndesaulniers@...gle.com,
nicolas@...sle.eu, catalin.marinas@....com, will@...nel.org,
vkoul@...nel.org, trix@...hat.com, ojeda@...nel.org,
peterz@...radead.org, mingo@...hat.com, longman@...hat.com,
boqun.feng@...il.com, dennis@...nel.org, tj@...nel.org,
cl@...ux.com, acme@...nel.org, mark.rutland@....com,
alexander.shishkin@...ux.intel.com, jolsa@...nel.org,
namhyung@...nel.org, irogers@...gle.com, adrian.hunter@...el.com,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
mgorman@...e.de, bristot@...hat.com, vschneid@...hat.com,
paulmck@...nel.org, frederic@...nel.org, quic_neeraju@...cinc.com,
joel@...lfernandes.org, josh@...htriplett.org,
mathieu.desnoyers@...icios.com, jiangshanlai@...il.com,
rientjes@...gle.com, vbabka@...e.cz, roman.gushchin@...ux.dev,
42.hyeyoo@...il.com, apw@...onical.com, joe@...ches.com,
dwaipayanray1@...il.com, lukas.bulwahn@...il.com,
john.johansen@...onical.com, paul@...l-moore.com,
jmorris@...ei.org, serge@...lyn.com, linux-kbuild@...r.kernel.org,
linux-kernel@...r.kernel.org, dmaengine@...r.kernel.org,
llvm@...ts.linux.dev, linux-perf-users@...r.kernel.org,
rcu@...r.kernel.org, linux-security-module@...r.kernel.org,
tglx@...utronix.de, ravi.bangoria@....com, error27@...il.com,
luc.vanoostenryck@...il.com
Subject: [PATCH v3 40/57] perf: Simplify perf_mmap_close()/perf_aux_sample_output()
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/events/core.c | 20 ++++++++------------
1 file changed, 8 insertions(+), 12 deletions(-)
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6179,6 +6179,9 @@ void ring_buffer_put(struct perf_buffer
call_rcu(&rb->rcu_head, rb_free_rcu);
}
+DEFINE_CLASS(ring_buffer_get, struct perf_buffer *, ring_buffer_put(_T),
+ ring_buffer_get(event), struct perf_event *event)
+
static void perf_mmap_open(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
@@ -6206,7 +6209,7 @@ static void perf_pmu_output_stop(struct
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
- struct perf_buffer *rb = ring_buffer_get(event);
+ CLASS(ring_buffer_get, rb)(event);
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
@@ -6245,14 +6248,14 @@ static void perf_mmap_close(struct vm_ar
detach_rest = true;
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
- goto out_put;
+ return;
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
/* If there's still other mmap()s of this buffer, we're done. */
if (!detach_rest)
- goto out_put;
+ return;
/*
* No other mmap()s, detach from all other events that might redirect
@@ -6309,9 +6312,6 @@ static void perf_mmap_close(struct vm_ar
&mmap_user->locked_vm);
atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
free_uid(mmap_user);
-
-out_put:
- ring_buffer_put(rb); /* could be last */
}
static const struct vm_operations_struct perf_mmap_vmops = {
@@ -6962,14 +6962,13 @@ static void perf_aux_sample_output(struc
struct perf_sample_data *data)
{
struct perf_event *sampler = event->aux_event;
- struct perf_buffer *rb;
unsigned long pad;
long size;
if (WARN_ON_ONCE(!sampler || !data->aux_size))
return;
- rb = ring_buffer_get(sampler);
+ CLASS(ring_buffer_get, rb)(sampler);
if (!rb)
return;
@@ -6982,7 +6981,7 @@ static void perf_aux_sample_output(struc
* like to know.
*/
if (WARN_ON_ONCE(size < 0))
- goto out_put;
+ return;
/*
* The pad comes from ALIGN()ing data->aux_size up to u64 in
@@ -6996,9 +6995,6 @@ static void perf_aux_sample_output(struc
u64 zero = 0;
perf_output_copy(handle, &zero, pad);
}
-
-out_put:
- ring_buffer_put(rb);
}
/*
Powered by blists - more mailing lists