lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190829064517.2751629-1-yhs@fb.com>
Date:   Wed, 28 Aug 2019 23:45:17 -0700
From:   Yonghong Song <yhs@...com>
To:     <bpf@...r.kernel.org>, <netdev@...r.kernel.org>
CC:     Alexei Starovoitov <ast@...com>,
        Brian Vazquez <brianvv@...gle.com>,
        Daniel Borkmann <daniel@...earbox.net>, <kernel-team@...com>,
        Yonghong Song <yhs@...com>
Subject: [PATCH bpf-next 13/13] tools/bpf: measure map batching perf

The test program run result:
  $ ./test_maps
  ...
  measure_lookup: max_entries 1000000, batch 10, time 342
  measure_lookup: max_entries 1000000, batch 1000, time 295
  measure_lookup: max_entries 1000000, batch 1000000, time 270
  measure_lookup: max_entries 1000000, no batching, time 1346
  measure_lookup_delete: max_entries 1000000, batch 10, time 433
  measure_lookup_delete: max_entries 1000000, batch 1000, time 363
  measure_lookup_delete: max_entries 1000000, batch 1000000, time 357
  measure_lookup_delete: max_entries 1000000, not batch, time 1894
  measure_delete: max_entries 1000000, batch, time 220
  measure_delete: max_entries 1000000, not batch, time 1289
  test_map_batch_perf:PASS
  ...

  The test is running on a qemu VM environment. The time
  unit is millisecond. A simple hash table with 1M elements
  is created.

  For lookup and lookup_and_deletion, since buffer allocation
  is needed to hold the lookup results, three different
  batch sizes (10, 1000, and 1M) are tried. The performance
  without batching is also measured. A batch size of 10
  can already improves performance dramatically, more than 70%,
  and increasing batch size may continue to improve performance,
  but with diminishing returns.

  For delete, the batch API provides a mechanism to delete all elements
  in the map, which is used here. The deletion of the whole map
  improves performance by 80% compared to non-batch mechanism.

Signed-off-by: Yonghong Song <yhs@...com>
---
 .../selftests/bpf/map_tests/map_batch_perf.c  | 242 ++++++++++++++++++
 1 file changed, 242 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/map_tests/map_batch_perf.c

diff --git a/tools/testing/selftests/bpf/map_tests/map_batch_perf.c b/tools/testing/selftests/bpf/map_tests/map_batch_perf.c
new file mode 100644
index 000000000000..42d95651e1ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/map_batch_perf.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook  */
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/time.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_maps.h>
+
+/* Test map batch performance.
+ * Test three common scenarios:
+ *    - batched lookup
+ *    - batched lookup and delete
+ *    - batched deletion
+ */
+static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
+			     int *values)
+{
+	int i, err;
+
+	for (i = 0; i < max_entries; i++) {
+		keys[i] = i + 1;
+		values[i] = i + 2;
+	}
+
+	err = bpf_map_update_batch(map_fd, keys, values, &max_entries, 0, 0);
+	CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
+}
+
+static unsigned long util_gettime(void)
+{
+	struct timeval tv;
+
+	gettimeofday(&tv, NULL);
+	return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+}
+
+static void measure_lookup(int map_fd, __u32 max_entries, int *keys,
+			   int *values)
+{
+	__u32 batches[3] = {10, 1000};
+	int err, key, value, option;
+	unsigned long start, end;
+	void *p_key, *p_next_key;
+	__u32 count, total;
+
+	map_batch_update(map_fd, max_entries, keys, values);
+
+	/* batched */
+	batches[2] = max_entries;
+	for (option = 0; option < 3; option++) {
+		p_key = NULL;
+		p_next_key = &key;
+		count = batches[option];
+		start = util_gettime();
+		total = 0;
+
+		while (true) {
+			err = bpf_map_lookup_batch(map_fd, p_key, &p_next_key,
+						   keys, values, &count, 0, 0);
+			CHECK(err, "bpf_map_lookup_batch()", "error: %s\n",
+			      strerror(errno));
+
+			total += count;
+			if (!p_next_key)
+				break;
+
+			if (!p_key)
+				p_key = p_next_key;
+		}
+
+		end = util_gettime();
+		CHECK(total != max_entries,
+		      "checking total", "total %u, max_entries %u\n",
+		      total, max_entries);
+		printf("%s: max_entries %u, batch %u, time %ld\n", __func__,
+		       max_entries, batches[option], end - start);
+	}
+
+	/* not batched */
+	start = util_gettime();
+	p_key = NULL;
+	p_next_key = &key;
+	while (!bpf_map_get_next_key(map_fd, p_key, p_next_key)) {
+		err = bpf_map_lookup_elem(map_fd, p_next_key, &value);
+		CHECK(err, "bpf_map_lookup_elem()", "error: %s\n",
+		      strerror(errno));
+		p_key = p_next_key;
+	}
+	end = util_gettime();
+	printf("%s: max_entries %u, no batching, time %ld\n", __func__,
+	       max_entries, end - start);
+}
+
+static void measure_lookup_delete(int map_fd, __u32 max_entries, int *keys,
+				  int *values)
+{
+	int err, key, next_key, value, option;
+	__u32 batches[3] = {10, 1000};
+	unsigned long start, end;
+	void *p_key, *p_next_key;
+	__u32 count, total;
+
+	/* batched */
+	batches[2] = max_entries;
+	for (option = 0; option < 3; option++) {
+		map_batch_update(map_fd, max_entries, keys, values);
+		p_key = NULL;
+		p_next_key = &key;
+		count = batches[option];
+		start = util_gettime();
+		total = 0;
+
+		while (true) {
+			err = bpf_map_lookup_and_delete_batch(map_fd, p_key,
+				&p_next_key, keys, values, &count, 0, 0);
+			CHECK(err, "bpf_map_lookup_and_delete_batch()",
+			      "error: %s\n", strerror(errno));
+
+			total += count;
+			if (!p_next_key)
+				break;
+
+			if (!p_key)
+				p_key = p_next_key;
+		}
+
+		end = util_gettime();
+		CHECK(total != max_entries,
+		      "checking total", "total %u, max_entries %u\n",
+		      total, max_entries);
+		printf("%s: max_entries %u, batch %u, time %ld\n", __func__,
+		       max_entries, batches[option], end - start);
+	}
+
+	/* not batched */
+	map_batch_update(map_fd, max_entries, keys, values);
+	start = util_gettime();
+	p_key = NULL;
+	p_next_key = &key;
+	err = bpf_map_get_next_key(map_fd, p_key, p_next_key);
+	CHECK(err, "bpf_map_get_next_key()", "error: %s\n", strerror(errno));
+	err = bpf_map_lookup_elem(map_fd, p_next_key, &value);
+	CHECK(err, "bpf_map_lookup_elem()", "error: %s\n", strerror(errno));
+
+	p_key = p_next_key;
+	p_next_key = &next_key;
+	while (!bpf_map_get_next_key(map_fd, p_key, p_next_key)) {
+		err = bpf_map_delete_elem(map_fd, p_key);
+		CHECK(err, "bpf_map_delete_elem()", "error: %s\n",
+		      strerror(errno));
+
+		err = bpf_map_lookup_elem(map_fd, p_next_key, &value);
+		CHECK(err, "bpf_map_lookup_elem()", "error: %s\n",
+		      strerror(errno));
+
+		p_key = p_next_key;
+		p_next_key = (p_next_key == &key) ? &next_key : &key;
+	}
+	err = bpf_map_delete_elem(map_fd, p_key);
+	CHECK(err, "bpf_map_delete_elem()", "error: %s\n",
+	      strerror(errno));
+	end = util_gettime();
+	printf("%s: max_entries %u, not batch, time %ld\n", __func__,
+	       max_entries, end - start);
+}
+
+static void measure_delete(int map_fd, __u32 max_entries, int *keys,
+			   int *values)
+{
+	unsigned long start, end;
+	void *p_key, *p_next_key;
+	int err, key, next_key;
+	__u32 count;
+
+	/* batched */
+	map_batch_update(map_fd, max_entries, keys, values);
+	count = 0;
+	p_next_key = &key;
+	start = util_gettime();
+	err = bpf_map_delete_batch(map_fd, NULL, NULL, NULL, &count, 0, 0);
+	end = util_gettime();
+	CHECK(err, "bpf_map_delete_batch()", "error: %s\n", strerror(errno));
+	CHECK(count != max_entries, "bpf_map_delete_batch()",
+	      "count = %u, max_entries = %u\n", count, max_entries);
+
+	printf("%s: max_entries %u, batch, time %ld\n", __func__,
+	       max_entries, end - start);
+
+	map_batch_update(map_fd, max_entries, keys, values);
+	p_key = NULL;
+	p_next_key = &key;
+	start = util_gettime();
+	err = bpf_map_get_next_key(map_fd, p_key, p_next_key);
+	CHECK(err, "bpf_map_get_next_key()", "error: %s\n", strerror(errno));
+
+	p_key = p_next_key;
+	p_next_key = &next_key;
+	while (!bpf_map_get_next_key(map_fd, p_key, p_next_key)) {
+		err = bpf_map_delete_elem(map_fd, p_key);
+		CHECK(err, "bpf_map_delete_elem()", "error: %s\n",
+		      strerror(errno));
+		p_key = p_next_key;
+		p_next_key = (p_next_key == &key) ? &next_key : &key;
+	}
+	err = bpf_map_delete_elem(map_fd, p_key);
+	CHECK(err, "bpf_map_delete_elem()", "error: %s\n",
+	      strerror(errno));
+	end = util_gettime();
+	printf("%s: max_entries %u, not batch, time %ld\n", __func__,
+	       max_entries, end - start);
+}
+
+void test_map_batch_perf(void)
+{
+	struct bpf_create_map_attr xattr = {
+		.name = "hash_map",
+		.map_type = BPF_MAP_TYPE_HASH,
+		.key_size = sizeof(int),
+		.value_size = sizeof(int),
+	};
+	const __u32 max_entries = 1000000;  // 1M entries for the hash table
+	int map_fd, *keys, *values;
+
+	xattr.max_entries = max_entries;
+	map_fd = bpf_create_map_xattr(&xattr);
+	CHECK(map_fd == -1,
+	      "bpf_create_map_xattr()", "error:%s\n", strerror(errno));
+
+	keys = malloc(max_entries * sizeof(int));
+	values = malloc(max_entries * sizeof(int));
+	CHECK(!keys || !values, "malloc()", "error:%s\n", strerror(errno));
+
+	measure_lookup(map_fd, max_entries, keys, values);
+	measure_lookup_delete(map_fd, max_entries, keys, values);
+	measure_delete(map_fd, max_entries, keys, values);
+
+	printf("%s:PASS\n", __func__);
+}
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ