[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190627202417.33370-7-brianvv@google.com>
Date: Thu, 27 Jun 2019 13:24:17 -0700
From: Brian Vazquez <brianvv@...gle.com>
To: Brian Vazquez <brianvv.kernel@...il.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
"David S . Miller" <davem@...emloft.net>
Cc: Stanislav Fomichev <sdf@...gle.com>,
Willem de Bruijn <willemb@...gle.com>,
Petar Penkov <ppenkov@...gle.com>,
linux-kernel@...r.kernel.org, netdev@...r.kernel.org,
bpf@...r.kernel.org, Brian Vazquez <brianvv@...gle.com>
Subject: [RFC PATCH bpf-next v2 6/6] selftests/bpf: add test to measure
performance of BPF_MAP_DUMP
This tests compares the amount of time that takes to read an entire
table of 100K elements on a bpf hashmap using both BPF_MAP_DUMP and
BPF_MAP_GET_NEXT_KEY + BPF_MAP_LOOKUP_ELEM.
Signed-off-by: Brian Vazquez <brianvv@...gle.com>
---
tools/testing/selftests/bpf/test_maps.c | 71 +++++++++++++++++++++++++
1 file changed, 71 insertions(+)
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 3df72b46fd1d9..61050272c20ee 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -18,6 +18,7 @@
#include <sys/socket.h>
#include <netinet/in.h>
#include <linux/bpf.h>
+#include <linux/time64.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
@@ -376,6 +377,75 @@ static void test_hashmap_dump(void)
close(fd);
}
+static void test_hashmap_dump_perf(void)
+{
+ int fd, i, max_entries = 100000;
+ uint64_t key, value, next_key;
+ bool next_key_valid = true;
+ void *buf;
+ u32 buf_len, entries;
+ int j, k = 0;
+ int num_ent, off;
+ int clk_id = CLOCK_MONOTONIC;
+ struct timespec begin, end;
+ long long time_spent, dump_time_spent;
+ double res;
+ int tests[] = {1, 2, 230, 5000, 73000, 100000, 234567};
+ int test_len = ARRAY_SIZE(tests);
+ const int elem_size = sizeof(key) + sizeof(value);
+
+ fd = helper_fill_hashmap(max_entries);
+ // Alloc memory considering the largest buffer
+ buf = malloc(elem_size * tests[test_len-1]);
+ assert(buf != NULL);
+
+test:
+ entries = tests[k];
+ buf_len = elem_size*tests[k];
+ k++;
+ clock_gettime(clk_id, &begin);
+ errno = 0;
+ i = 0;
+ while (errno == 0) {
+ bpf_map_dump(fd, !i ? NULL : &key,
+ buf, &buf_len);
+ if (errno)
+ break;
+ num_ent = buf_len / elem_size;
+ for (j = 0, off = 0; j < num_ent; j++) {
+ key = *((uint64_t *)(buf + off));
+ off += sizeof(key);
+ value = *((uint64_t *)(buf + off));
+ off += sizeof(value);
+ }
+ i += num_ent;
+ }
+ clock_gettime(clk_id, &end);
+ assert(i == max_entries);
+ dump_time_spent = NSEC_PER_SEC * (end.tv_sec - begin.tv_sec) +
+ end.tv_nsec - begin.tv_nsec;
+ next_key_valid = true;
+ clock_gettime(clk_id, &begin);
+ assert(bpf_map_get_next_key(fd, NULL, &key) == 0);
+ for (i = 0; next_key_valid; i++) {
+ next_key_valid = bpf_map_get_next_key(fd, &key, &next_key) == 0;
+ assert(bpf_map_lookup_elem(fd, &key, &value) == 0);
+ key = next_key;
+ }
+ clock_gettime(clk_id, &end);
+ time_spent = NSEC_PER_SEC * (end.tv_sec - begin.tv_sec) +
+ end.tv_nsec - begin.tv_nsec;
+ res = (1-((double)dump_time_spent/time_spent))*100;
+ printf("buf_len_%u:\t %llu entry-by-entry: %llu improvement %lf\n",
+ entries, dump_time_spent, time_spent, res);
+ assert(i == max_entries);
+
+ if (k < test_len)
+ goto test;
+ free(buf);
+ close(fd);
+}
+
static void test_hashmap_zero_seed(void)
{
int i, first, second, old_flags;
@@ -1736,6 +1806,7 @@ static void run_all_tests(void)
test_hashmap_walk(0, NULL);
test_hashmap_zero_seed();
test_hashmap_dump();
+ test_hashmap_dump_perf();
test_arraymap(0, NULL);
test_arraymap_percpu(0, NULL);
--
2.22.0.410.gd8fdbe21b5-goog
Powered by blists - more mailing lists