[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <3e7dabc8a4762aad3836244661f843dda689588b.1501576497.git.naveen.n.rao@linux.vnet.ibm.com>
Date: Tue, 1 Aug 2017 20:14:04 +0530
From: "Naveen N. Rao" <naveen.n.rao@...ux.vnet.ibm.com>
To: Peter Zijlstra <peterz@...radead.org>,
Jiri Olsa <jolsa@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Ingo Molnar <mingo@...nel.org>,
Vince Weaver <vincent.weaver@...ne.edu>
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH v2 2/2] kernel/events: Add option to enable counting sideband events in wakeup_events
Many sideband events are interesting by themselves. When profiling only
for sideband events, it is useful to be able to control process wakeup
(wakeup_events) based on sideband events alone. Add a new option
'count_sb_events' to do the same.
IOC_REFRESH won't be supported with this, so disable that.
Signed-off-by: Naveen N. Rao <naveen.n.rao@...ux.vnet.ibm.com>
---
include/uapi/linux/perf_event.h | 3 ++-
kernel/events/core.c | 23 +++++++----------------
kernel/events/internal.h | 1 +
kernel/events/ring_buffer.c | 18 ++++++++++++++++++
4 files changed, 28 insertions(+), 17 deletions(-)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e5810b1d74a4..ab4dc9a02151 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -346,7 +346,8 @@ struct perf_event_attr {
write_backward : 1, /* Write ring buffer from end to beginning */
namespaces : 1, /* include namespaces data */
signal_on_wakeup : 1, /* send signal on wakeup */
- __reserved_1 : 34;
+ count_sb_events : 1, /* wakeup_events also counts sideband events */
+ __reserved_1 : 33;
union {
__u32 wakeup_events; /* wakeup every n events */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4fe708a4fdee..118a100108b1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2680,11 +2680,13 @@ EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
- * not supported on inherited events or if user has requested for
- * signals on ring buffer wakeup.
+ * not supported on:
+ * - inherited events
+ * - if user has requested for signals on ring buffer wakeup, or
+ * - if counting sideband events
*/
if (event->attr.inherit || event->attr.signal_on_wakeup ||
- !is_sampling_event(event))
+ event->attr.count_sb_events || !is_sampling_event(event))
return -EINVAL;
atomic_add(refresh, &event->event_limit);
@@ -5974,19 +5976,8 @@ void perf_output_sample(struct perf_output_handle *handle,
}
}
- if (!event->attr.watermark) {
- int wakeup_events = event->attr.wakeup_events;
-
- if (wakeup_events) {
- struct ring_buffer *rb = handle->rb;
- int events = local_inc_return(&rb->events);
-
- if (events >= wakeup_events) {
- local_sub(wakeup_events, &rb->events);
- local_inc(&rb->wakeup);
- }
- }
- }
+ if (!event->attr.count_sb_events)
+ rb_handle_wakeup_events(event, handle->rb);
}
void perf_prepare_sample(struct perf_event_header *header,
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 486fd78eb8d5..b75137ebe9f7 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -81,6 +81,7 @@ extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
extern void rb_free_aux(struct ring_buffer *rb);
extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
extern void ring_buffer_put(struct ring_buffer *rb);
+extern void rb_handle_wakeup_events(struct perf_event *event, struct ring_buffer *rb);
static inline bool rb_has_aux(struct ring_buffer *rb)
{
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index e7a558cfcadb..a34f5c2e7ed1 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -116,6 +116,21 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
return CIRC_SPACE(tail, head, data_size) >= size;
}
+void __always_inline
+rb_handle_wakeup_events(struct perf_event *event, struct ring_buffer *rb)
+{
+ int wakeup_events = event->attr.wakeup_events;
+
+ if (!event->attr.watermark && wakeup_events) {
+ int events = local_inc_return(&rb->events);
+
+ if (events >= wakeup_events) {
+ local_sub(wakeup_events, &rb->events);
+ local_inc(&rb->wakeup);
+ }
+ }
+}
+
static int __always_inline
__perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
@@ -197,6 +212,9 @@ __perf_output_begin(struct perf_output_handle *handle,
* none of the data stores below can be lifted up by the compiler.
*/
+ if (unlikely(event->attr.count_sb_events))
+ rb_handle_wakeup_events(event, rb);
+
if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
local_add(rb->watermark, &rb->wakeup);
--
2.13.3
Powered by blists - more mailing lists