lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun,  7 Feb 2010 12:30:55 +0100
From:	highguy@...il.com
To:	mingo@...e.hu, linux-kernel@...r.kernel.org
Cc:	torvalds@...ux-foundation.org, efault@....de,
	a.p.zijlstra@...llo.nl, andrea@...e.de, tglx@...utronix.de,
	akpm@...ux-foundation.org, peterz@...radead.org,
	Stijn Devriendt <stijn@...jn.telenet.be>
Subject: [PATCH 2/6] Allow min/max thresholds for wakeups

From: Stijn Devriendt <stijn@...jn.telenet.be>

---
 include/linux/perf_event.h |    5 +++--
 kernel/perf_event.c        |   28 ++++++++++++++++++++++++----
 2 files changed, 27 insertions(+), 6 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 827a221..0fa235e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -183,6 +183,7 @@ struct perf_event_attr {
 	union {
 		__u64		sample_period;
 		__u64		sample_freq;
+		__u64           max_threshold;        /* maximum threshold     */
 	};
 
 	__u64			sample_type;
@@ -203,16 +204,16 @@ struct perf_event_attr {
 				enable_on_exec :  1, /* next exec enables     */
 				task           :  1, /* trace fork/exit       */
 				watermark      :  1, /* wakeup_watermark      */
+				threshold      :  1, /* tresholds             */
 
 				__reserved_1   : 49;
 
 	union {
 		__u32		wakeup_events;	  /* wakeup every n events */
 		__u32		wakeup_watermark; /* bytes before wakeup   */
+		__u64           min_threshold;    /* minimum threshold     */
 	};
 
-	__u32			__reserved_2;
-
 	__u64			bp_addr;
 	__u32			bp_type;
 	__u32			bp_len;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 42dc18d..70ca6e1 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1401,6 +1401,8 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
 		if (!event->attr.freq || !event->attr.sample_freq)
 			continue;
 
+		if (event->attr.threshold)
+			continue;
 		/*
 		 * if the specified freq < HZ then we need to skip ticks
 		 */
@@ -1921,6 +1923,15 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
 	struct perf_event *event = file->private_data;
 	unsigned int events = atomic_xchg(&event->poll, 0);
 
+	if (event->attr.threshold)
+	{
+		u64 count = atomic64_read(&event->count);
+		if (count < event->attr.min_threshold)
+			events |= POLLIN;
+		else if (count > event->attr.max_threshold)
+			events &= ~POLLIN;
+	}
+
 	poll_wait(file, &event->waitq, wait);
 
 	return events;
@@ -1979,6 +1990,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
 	if (!event->attr.sample_period)
 		return -EINVAL;
 
+	if (event->attr.threshold)
+		return -EINVAL;
+
 	size = copy_from_user(&value, arg, sizeof(value));
 	if (size != sizeof(value))
 		return -EFAULT;
@@ -2675,6 +2689,9 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
 
 static void __perf_output_wakeup(struct perf_event* event, int nmi)
 {
+	if (event->attr.threshold && atomic64_read(&event->count) > event->attr.max_threshold)
+		return;
+	
 	atomic_set(&event->poll, POLLIN);
 
 	if (nmi) {
@@ -2895,7 +2912,7 @@ void perf_output_end(struct perf_output_handle *handle)
 	struct perf_event *event = handle->event;
 	struct perf_mmap_data *data = handle->data;
 
-	int wakeup_events = event->attr.wakeup_events;
+	int wakeup_events = event->attr.thresold ? 1 : event->attr.wakeup_events;
 
 	if (handle->sample && wakeup_events) {
 		int events = atomic_inc_return(&data->events);
@@ -4445,7 +4462,7 @@ perf_event_alloc(struct perf_event_attr *attr,
 
 	hwc = &event->hw;
 	hwc->sample_period = attr->sample_period;
-	if (attr->freq && attr->sample_freq)
+	if (attr->threshold || (attr->freq && attr->sample_freq))
 		hwc->sample_period = 1;
 	hwc->last_period = hwc->sample_period;
 
@@ -4571,7 +4588,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
 	if (attr->type >= PERF_TYPE_MAX)
 		return -EINVAL;
 
-	if (attr->__reserved_1 || attr->__reserved_2)
+	if (attr->__reserved_1)
 		return -EINVAL;
 
 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -4674,6 +4691,9 @@ SYSCALL_DEFINE5(perf_event_open,
 			return -EACCES;
 	}
 
+	if (attr.threshold && (attr.freq || attr.watermark))
+		return -EINVAL;
+	
 	if (attr.freq) {
 		if (attr.sample_freq > sysctl_perf_event_sample_rate)
 			return -EINVAL;
@@ -4862,7 +4882,7 @@ inherit_event(struct perf_event *parent_event,
 	else
 		child_event->state = PERF_EVENT_STATE_OFF;
 
-	if (parent_event->attr.freq)
+	if (parent_event->attr.freq || parent_event->attr.threshold)
 		child_event->hw.sample_period = parent_event->hw.sample_period;
 
 	child_event->overflow_handler = parent_event->overflow_handler;
-- 
1.6.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ