lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1413207948-28202-21-git-send-email-alexander.shishkin@linux.intel.com>
Date:	Mon, 13 Oct 2014 16:45:48 +0300
From:	Alexander Shishkin <alexander.shishkin@...ux.intel.com>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	Ingo Molnar <mingo@...hat.com>, linux-kernel@...r.kernel.org,
	Robert Richter <rric@...nel.org>,
	Frederic Weisbecker <fweisbec@...il.com>,
	Mike Galbraith <efault@....de>,
	Paul Mackerras <paulus@...ba.org>,
	Stephane Eranian <eranian@...gle.com>,
	Andi Kleen <ak@...ux.intel.com>, kan.liang@...el.com,
	adrian.hunter@...el.com, acme@...radead.org,
	Alexander Shishkin <alexander.shishkin@...ux.intel.com>
Subject: [PATCH v5 20/20] perf: Allow AUX sampling of inherited events

Try to find an AUX sampler event for the current event if none is linked
via event::sampler.

This is useful when these events (the one that is being sampled and the
one providing sample annotation) are allocated by inheritance path,
independently of one another and the latter is not yet referenced by the
former's event::sampler.

Signed-off-by: Alexander Shishkin <alexander.shishkin@...ux.intel.com>
---
 kernel/events/core.c | 94 ++++++++++++++++++++++++++++++++++------------------
 1 file changed, 62 insertions(+), 32 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index ad7b1e92dd..02fcd84b0f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4658,47 +4658,67 @@ static bool perf_aux_event_match(struct perf_event *e1, struct perf_event *e2)
 	return has_aux(e1) && exclusive_event_match(e1, e2);
 }
 
+struct perf_event *__find_sampling_counter(struct perf_event_context *ctx,
+					   struct perf_event *event,
+					   struct task_struct *task)
+{
+	struct perf_event *sampler = NULL;
+
+	list_for_each_entry(sampler, &ctx->event_list, event_entry) {
+		/*
+		 * event is not an itrace event, but all the relevant
+		 * bits should match
+		 */
+		if (perf_aux_event_match(sampler, event) &&
+		    kernel_rb_event(sampler) &&
+		    sampler->attr.type == event->attr.aux_sample_type &&
+		    sampler->attr.config == event->attr.aux_sample_config &&
+		    sampler->attr.exclude_hv == event->attr.exclude_hv &&
+		    sampler->attr.exclude_idle == event->attr.exclude_idle &&
+		    sampler->attr.exclude_user == event->attr.exclude_user &&
+		    sampler->attr.exclude_kernel == event->attr.exclude_kernel &&
+		    sampler->attr.aux_sample_size >= event->attr.aux_sample_size &&
+		    atomic_long_inc_not_zero(&sampler->refcount))
+			return sampler;
+	}
+
+	return NULL;
+}
+
+struct perf_event *find_sampling_counter(struct pmu *pmu,
+					 struct perf_event *event,
+					 struct task_struct *task)
+{
+	struct perf_event_context *ctx;
+	struct perf_event *sampler = NULL;
+	unsigned long flags;
+
+	ctx = find_get_context(pmu, task, event->cpu);
+	if (!ctx)
+		return NULL;
+
+	raw_spin_lock_irqsave(&ctx->lock, flags);
+	sampler = __find_sampling_counter(ctx, event, task);
+	--ctx->pin_count;
+	put_ctx(ctx);
+	raw_spin_unlock_irqrestore(&ctx->lock, flags);
+
+	return sampler;
+}
+
 static int perf_aux_sampler_init(struct perf_event *event,
 				 struct task_struct *task,
 				 struct pmu *pmu)
 {
-	struct perf_event_context *ctx;
 	struct perf_event_attr attr;
-	struct perf_event *sampler = NULL;
+	struct perf_event *sampler;
 	struct ring_buffer *rb;
-	unsigned long nr_pages, flags;
+	unsigned long nr_pages;
 
 	if (!pmu || !(pmu->setup_aux))
 		return -ENOTSUPP;
 
-	ctx = find_get_context(pmu, task, event->cpu);
-	if (ctx) {
-		raw_spin_lock_irqsave(&ctx->lock, flags);
-		list_for_each_entry(sampler, &ctx->event_list, event_entry) {
-			/*
-			 * event is not an aux event, but all the relevant
-			 * bits should match
-			 */
-			if (perf_aux_event_match(sampler, event) &&
-			    sampler->attr.type == event->attr.aux_sample_type &&
-			    sampler->attr.config == event->attr.aux_sample_config &&
-			    sampler->attr.exclude_hv == event->attr.exclude_hv &&
-			    sampler->attr.exclude_idle == event->attr.exclude_idle &&
-			    sampler->attr.exclude_user == event->attr.exclude_user &&
-			    sampler->attr.exclude_kernel == event->attr.exclude_kernel &&
-			    sampler->attr.aux_sample_size >= event->attr.aux_sample_size &&
-			    atomic_long_inc_not_zero(&sampler->refcount))
-				goto got_event;
-		}
-
-		sampler = NULL;
-
-got_event:
-		--ctx->pin_count;
-		put_ctx(ctx);
-		raw_spin_unlock_irqrestore(&ctx->lock, flags);
-	}
-
+	sampler = find_sampling_counter(pmu, event, task);
 	if (!sampler) {
 		memset(&attr, 0, sizeof(attr));
 		attr.type = pmu->type;
@@ -4749,9 +4769,19 @@ static void perf_aux_sampler_fini(struct perf_event *event)
 static unsigned long perf_aux_sampler_trace(struct perf_event *event,
 					    struct perf_sample_data *data)
 {
-	struct perf_event *sampler = event->sampler;
+	struct perf_event *sampler;
 	struct ring_buffer *rb;
 
+	if (!event->sampler) {
+		/*
+		 * down this path, event->ctx is already locked IF it's the
+		 * same context
+		 */
+		event->sampler = __find_sampling_counter(event->ctx, event,
+							 event->ctx->task);
+	}
+
+	sampler = event->sampler;
 	if (!sampler || sampler->state != PERF_EVENT_STATE_ACTIVE) {
 		data->aux.size = 0;
 		goto out;
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ