lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20141002141559.GJ9764@krava.brq.redhat.com>
Date:	Thu, 2 Oct 2014 16:15:59 +0200
From:	Jiri Olsa <jolsa@...hat.com>
To:	Alexander Yarygin <yarygin@...ux.vnet.ibm.com>
Cc:	linux-kernel@...r.kernel.org,
	Arnaldo Carvalho de Melo <acme@...nel.org>,
	Christian Borntraeger <borntraeger@...ibm.com>,
	David Ahern <dsahern@...il.com>,
	Frederic Weisbecker <fweisbec@...il.com>,
	Ingo Molnar <mingo@...nel.org>, Mike Galbraith <efault@....de>,
	Namhyung Kim <namhyung.kim@....com>,
	Paul Mackerras <paulus@...ba.org>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>,
	Stephane Eranian <eranian@...gle.com>
Subject: Re: [PATCH 1/2] perf tools: Add option to copy events when queueing

On Thu, Oct 02, 2014 at 02:32:08PM +0400, Alexander Yarygin wrote:

SNIP

> +		if (!oe->buffer) {
> +			if (oe->copy_on_queue) {
> +				oe->cur_alloc_size -= new_event->header.size;
> +				free(new_event);
> +			}
>  			return NULL;
> +		}
>  
>  		pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
>  		   oe->cur_alloc_size, size, oe->max_alloc_size);
> @@ -90,15 +110,19 @@ static struct ordered_event *alloc_event(struct ordered_events *oe)
>  		pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
>  	}
>  
> +	new->event = new_event;
> +
>  	return new;
>  }
>  
>  struct ordered_event *
> -ordered_events__new(struct ordered_events *oe, u64 timestamp)
> +ordered_events__new(struct ordered_events *oe, u64 timestamp,
> +		    union perf_event *event)
>  {
>  	struct ordered_event *new;
>  
> -	new = alloc_event(oe);
> +	new = alloc_event(oe, event);
> +
>  	if (new) {
>  		new->timestamp = timestamp;
>  		queue_event(oe, new);
> @@ -111,6 +135,10 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
>  {
>  	list_move(&event->list, &oe->cache);
>  	oe->nr_events--;
> +	if (oe->copy_on_queue) {
> +		oe->cur_alloc_size -= event->event->header.size;
> +		free(event->event);
> +	}
>  }
>  
>  static int __ordered_events__flush(struct perf_session *s,
> @@ -240,6 +268,11 @@ void ordered_events__free(struct ordered_events *oe)
>  
>  		event = list_entry(oe->to_free.next, struct ordered_event, list);
>  		list_del(&event->list);
> +		if (oe->copy_on_queue) {
> +			oe->cur_alloc_size -= event->event->header.size;
> +			free(event->event);
> +		}
> +
>  		free(event);

looks ok.. but I was wondering if we could move those repeating
bits in function.. something like below (untested, just compiled)

thanks,
jirka


---
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index f7383ccc6690..583dcefc92fb 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -58,23 +58,41 @@ static void queue_event(struct ordered_events *oe, struct ordered_event *new)
 	}
 }
 
+static union perf_event *__dup_event(struct ordered_events *oe, union perf_event *event)
+{
+	union perf_event *new_event = NULL;
+
+	if (oe->cur_alloc_size < oe->max_alloc_size) {
+		new_event = memdup(event, event->header.size);
+		if (new_event)
+			oe->cur_alloc_size += event->header.size;
+	}
+
+	return new_event;
+}
+
+static union perf_event *dup_event(struct ordered_events *oe, union perf_event *event)
+{
+	return oe->copy_on_queue ? __dup_event(oe, event) : event;
+}
+
+static void free_dup_event(struct ordered_events *oe, union perf_event *event)
+{
+	if (oe->copy_on_queue) {
+		oe->cur_alloc_size -= event->header.size;
+		free(event);
+	}
+}
+
 #define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
 static struct ordered_event *alloc_event(struct ordered_events *oe,
 					 union perf_event *event)
 {
 	struct list_head *cache = &oe->cache;
 	struct ordered_event *new = NULL;
-	union perf_event *new_event = NULL;
-
-	if (oe->copy_on_queue) {
-		if (oe->cur_alloc_size < oe->max_alloc_size) {
-			new_event = memdup(event, event->header.size);
-			if (new_event)
-				oe->cur_alloc_size += event->header.size;
-		}
-	} else
-		new_event = event;
+	union perf_event *new_event;
 
+	new_event = dup_event(oe, event);
 	if (!new_event)
 		return NULL;
 
@@ -90,10 +108,7 @@ static struct ordered_event *alloc_event(struct ordered_events *oe,
 
 		oe->buffer = malloc(size);
 		if (!oe->buffer) {
-			if (oe->copy_on_queue) {
-				oe->cur_alloc_size -= new_event->header.size;
-				free(new_event);
-			}
+			free_dup_event(oe, new_event);
 			return NULL;
 		}
 
@@ -135,10 +150,7 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
 {
 	list_move(&event->list, &oe->cache);
 	oe->nr_events--;
-	if (oe->copy_on_queue) {
-		oe->cur_alloc_size -= event->event->header.size;
-		free(event->event);
-	}
+	free_dup_event(oe, event->event);
 }
 
 static int __ordered_events__flush(struct perf_session *s,
@@ -268,11 +280,7 @@ void ordered_events__free(struct ordered_events *oe)
 
 		event = list_entry(oe->to_free.next, struct ordered_event, list);
 		list_del(&event->list);
-		if (oe->copy_on_queue) {
-			oe->cur_alloc_size -= event->event->header.size;
-			free(event->event);
-		}
-
+		free_dup_event(oe, event->event);
 		free(event);
 	}
 }
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ