lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210123225709.GB138414@krava>
Date:   Sat, 23 Jan 2021 23:57:09 +0100
From:   Jiri Olsa <jolsa@...hat.com>
To:     "Jin, Yao" <yao.jin@...ux.intel.com>
Cc:     acme@...nel.org, jolsa@...nel.org, peterz@...radead.org,
        mingo@...hat.com, alexander.shishkin@...ux.intel.com,
        Linux-kernel@...r.kernel.org, ak@...ux.intel.com,
        kan.liang@...el.com, yao.jin@...el.com, ying.huang@...el.com
Subject: Re: [PATCH v7] perf stat: Fix wrong skipping for per-die aggregation

On Thu, Jan 21, 2021 at 12:21:36PM +0800, Jin, Yao wrote:

sNIP

> mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
> d = cpu_map__get_die(cpus, cpu, NULL).die;
> key = (size_t)d << KEY_SHIFT | s;	/* s is socket id */
> if (hashmap__find(mask, (void *)key, NULL))
> 	*skip = true;
> else
> 	ret = hashmap__add(mask, (void *)key, (void *)1);
> 
> If we use 'unsigned long' to replace 'size_t', it reports the build error for 32 bits:
> 
> stat.c:320:23: warning: passing argument 1 of ‘hashmap__new’ from
> incompatible pointer type [-Wincompatible-pointer-types]
>    mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
>                        ^~~~~~~~~~~
> In file included from stat.c:16:
> hashmap.h:75:17: note: expected ‘hashmap_hash_fn’ {aka ‘unsigned int
> (*)(const void *, void *)’} but argument is of type ‘long unsigned int
> (*)(const void *, void *)’
> 
> If we use "unsigned int", it's not good for 64 bits. So I still use 'size_t' in this patch.
> 
> Any comments for this idea (using conditional compilation)?

isn't it simpler to allocate the key then? like below
(just compile tested)

jirka


---
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 5aba8fa92386..195fda142c98 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -276,19 +276,31 @@ void evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
 
 static void zero_per_pkg(struct evsel *counter)
 {
-	if (counter->per_pkg_mask)
+	struct hashmap_entry *cur;
+	size_t bkt;
+
+	if (counter->per_pkg_mask) {
+		hashmap__for_each_entry(counter->per_pkg_mask, cur, bkt)
+			free((char *)cur->key);
+
 		hashmap__clear(counter->per_pkg_mask);
+	}
 }
 
-static size_t pkg_id_hash(const void *key, void *ctx __maybe_unused)
+static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused)
 {
-	return (size_t)key & 0xffff;
+	uint64_t *key = (uint64_t*) __key;
+
+	return *key & 0xffffffff;
 }
 
-static bool pkg_id_equal(const void *key1, const void *key2,
+static bool pkg_id_equal(const void *__key1, const void *__key2,
 			 void *ctx __maybe_unused)
 {
-	return (size_t)key1 == (size_t)key2;
+	uint64_t *key1 = (uint64_t*) __key1;
+	uint64_t *key2 = (uint64_t*) __key2;
+
+	return *key1 == *key2;
 }
 
 static int check_per_pkg(struct evsel *counter,
@@ -297,7 +309,7 @@ static int check_per_pkg(struct evsel *counter,
 	struct hashmap *mask = counter->per_pkg_mask;
 	struct perf_cpu_map *cpus = evsel__cpus(counter);
 	int s, d, ret = 0;
-	size_t key;
+	uint64_t *key;
 
 	*skip = false;
 
@@ -338,7 +350,11 @@ static int check_per_pkg(struct evsel *counter,
 	if (d < 0)
 		return -1;
 
-	key = (size_t)d << 16 | s;
+	key = malloc(sizeof(*key));
+	if (!key)
+		return -ENOMEM;
+
+	*key = (size_t)d << 32 | s;
 	if (hashmap__find(mask, (void *)key, NULL))
 		*skip = true;
 	else

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ