[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <ea1be1160ae17a5348560a5825960f517825e76b.1501576497.git.naveen.n.rao@linux.vnet.ibm.com>
Date: Tue, 1 Aug 2017 20:14:03 +0530
From: "Naveen N. Rao" <naveen.n.rao@...ux.vnet.ibm.com>
To: Peter Zijlstra <peterz@...radead.org>,
Jiri Olsa <jolsa@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Ingo Molnar <mingo@...nel.org>,
Vince Weaver <vincent.weaver@...ne.edu>
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH v2 1/2] kernel/events: Add option to notify through signals on wakeup
Add a new option 'signal_on_wakeup' to request for a signal to be
delivered on ring buffer wakeup controlled through watermark and
{wakeup_events, wakeup_watermark}. HUP is signaled on exit.
Setting signal_on_wakeup disables use of IOC_REFRESH to control signal
delivery, instead relying on IOC_ENABLE/DISABLE.
Signed-off-by: Naveen N. Rao <naveen.n.rao@...ux.vnet.ibm.com>
---
include/uapi/linux/perf_event.h | 3 ++-
kernel/events/core.c | 22 ++++++++++++++--------
kernel/events/ring_buffer.c | 3 +++
3 files changed, 19 insertions(+), 9 deletions(-)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b1c0b187acfe..e5810b1d74a4 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -345,7 +345,8 @@ struct perf_event_attr {
context_switch : 1, /* context switch data */
write_backward : 1, /* Write ring buffer from end to beginning */
namespaces : 1, /* include namespaces data */
- __reserved_1 : 35;
+ signal_on_wakeup : 1, /* send signal on wakeup */
+ __reserved_1 : 34;
union {
__u32 wakeup_events; /* wakeup every n events */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 426c2ffba16d..4fe708a4fdee 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2680,9 +2680,11 @@ EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
- * not supported on inherited events
+ * not supported on inherited events or if user has requested for
+ * signals on ring buffer wakeup.
*/
- if (event->attr.inherit || !is_sampling_event(event))
+ if (event->attr.inherit || event->attr.signal_on_wakeup ||
+ !is_sampling_event(event))
return -EINVAL;
atomic_add(refresh, &event->event_limit);
@@ -7341,7 +7343,6 @@ static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
- int events = atomic_read(&event->event_limit);
int ret = 0;
/*
@@ -7358,12 +7359,15 @@ static int __perf_event_overflow(struct perf_event *event,
* events
*/
- event->pending_kill = POLL_IN;
- if (events && atomic_dec_and_test(&event->event_limit)) {
- ret = 1;
- event->pending_kill = POLL_HUP;
+ if (!event->attr.signal_on_wakeup) {
+ int events = atomic_read(&event->event_limit);
+ event->pending_kill = POLL_IN;
+ if (events && atomic_dec_and_test(&event->event_limit)) {
+ ret = 1;
+ event->pending_kill = POLL_HUP;
- perf_event_disable_inatomic(event);
+ perf_event_disable_inatomic(event);
+ }
}
READ_ONCE(event->overflow_handler)(event, data, regs);
@@ -10427,6 +10431,8 @@ perf_event_exit_event(struct perf_event *child_event,
perf_group_detach(child_event);
list_del_event(child_event, child_ctx);
child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
+ if (child_event->attr.signal_on_wakeup)
+ child_event->pending_kill = POLL_HUP;
raw_spin_unlock_irq(&child_ctx->lock);
/*
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index ee97196bb151..e7a558cfcadb 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -21,6 +21,9 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
{
atomic_set(&handle->rb->poll, POLLIN);
+ if (handle->event->attr.signal_on_wakeup)
+ handle->event->pending_kill = POLL_IN;
+
handle->event->pending_wakeup = 1;
irq_work_queue(&handle->event->pending);
}
--
2.13.3
Powered by blists - more mailing lists