[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190703170118.196552-7-brianvv@google.com>
Date: Wed, 3 Jul 2019 10:01:18 -0700
From: Brian Vazquez <brianvv@...gle.com>
To: Brian Vazquez <brianvv.kernel@...il.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
"David S . Miller" <davem@...emloft.net>
Cc: Stanislav Fomichev <sdf@...gle.com>,
Willem de Bruijn <willemb@...gle.com>,
Petar Penkov <ppenkov@...gle.com>,
linux-kernel@...r.kernel.org, netdev@...r.kernel.org,
bpf@...r.kernel.org, Brian Vazquez <brianvv@...gle.com>
Subject: [PATCH bpf-next RFC v3 6/6] selftests/bpf: add test to measure
performance of BPF_MAP_DUMP
This tests compares the amount of time that takes to read an entire
table of 100K elements on a bpf hashmap using both BPF_MAP_DUMP and
BPF_MAP_GET_NEXT_KEY + BPF_MAP_LOOKUP_ELEM.
Signed-off-by: Brian Vazquez <brianvv@...gle.com>
---
tools/testing/selftests/bpf/test_maps.c | 65 +++++++++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index b19ba6aa8e36..786d0e340aed 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -18,6 +18,7 @@
#include <sys/socket.h>
#include <netinet/in.h>
#include <linux/bpf.h>
+#include <linux/time64.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
@@ -388,6 +389,69 @@ static void test_hashmap_dump(void)
close(fd);
}
+static void test_hashmap_dump_perf(void)
+{
+ int fd, i, max_entries = 100000;
+ uint64_t key, value, next_key;
+ bool next_key_valid = true;
+ void *buf;
+ u32 buf_len, entries;
+ int j = 0;
+ int clk_id = CLOCK_MONOTONIC;
+ struct timespec begin, end;
+ long long time_spent, dump_time_spent;
+ double res;
+ int tests[] = {1, 2, 230, 5000, 73000, 100000, 234567};
+ int test_len = ARRAY_SIZE(tests);
+ const int elem_size = sizeof(key) + sizeof(value);
+
+ fd = helper_fill_hashmap(max_entries);
+ // Alloc memory considering the largest buffer
+ buf = malloc(elem_size * tests[test_len-1]);
+ assert(buf != NULL);
+
+test:
+ entries = tests[j];
+ buf_len = elem_size*tests[j];
+ j++;
+ clock_gettime(clk_id, &begin);
+ errno = 0;
+ i = 0;
+ while (errno == 0) {
+ bpf_map_dump(fd, !i ? NULL : &key,
+ buf, &buf_len);
+ if (errno)
+ break;
+ if (!i)
+ key = *((uint64_t *)(buf + buf_len - elem_size));
+ i += buf_len / elem_size;
+ }
+ clock_gettime(clk_id, &end);
+ assert(i == max_entries);
+ dump_time_spent = NSEC_PER_SEC * (end.tv_sec - begin.tv_sec) +
+ end.tv_nsec - begin.tv_nsec;
+ next_key_valid = true;
+ clock_gettime(clk_id, &begin);
+ assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
+ for (i = 0; next_key_valid; i++) {
+ next_key_valid = bpf_map_get_next_key(fd, &key, &next_key) == 0;
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0);
+ key = next_key;
+ }
+ clock_gettime(clk_id, &end);
+ time_spent = NSEC_PER_SEC * (end.tv_sec - begin.tv_sec) +
+ end.tv_nsec - begin.tv_nsec;
+ res = (1-((double)dump_time_spent/time_spent))*100;
+ printf("buf_len_%u:\t %llu entry-by-entry: %llu improvement %lf\n",
+ entries, dump_time_spent, time_spent, res);
+ assert(i == max_entries);
+
+ if (j < test_len)
+ goto test;
+ free(buf);
+ close(fd);
+}
+
static void test_hashmap_zero_seed(void)
{
int i, first, second, old_flags;
@@ -1748,6 +1812,7 @@ static void run_all_tests(void)
test_hashmap_walk(0, NULL);
test_hashmap_zero_seed();
test_hashmap_dump();
+ test_hashmap_dump_perf();
test_arraymap(0, NULL);
test_arraymap_percpu(0, NULL);
--
2.22.0.410.gd8fdbe21b5-goog
Powered by blists - more mailing lists