lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240711-convert_test_xdp_veth-v1-2-868accb0a727@bootlin.com>
Date: Thu, 11 Jul 2024 10:09:35 +0200
From: Alexis Lothoré (eBPF Foundation) <alexis.lothore@...tlin.com>
To: Alexei Starovoitov <ast@...nel.org>, 
 Daniel Borkmann <daniel@...earbox.net>, 
 "David S. Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>, 
 Jesper Dangaard Brouer <hawk@...nel.org>, 
 John Fastabend <john.fastabend@...il.com>, 
 Andrii Nakryiko <andrii@...nel.org>, 
 Martin KaFai Lau <martin.lau@...ux.dev>, 
 Eduard Zingerman <eddyz87@...il.com>, Song Liu <song@...nel.org>, 
 Yonghong Song <yonghong.song@...ux.dev>, KP Singh <kpsingh@...nel.org>, 
 Stanislav Fomichev <sdf@...ichev.me>, Hao Luo <haoluo@...gle.com>, 
 Jiri Olsa <jolsa@...nel.org>, Mykola Lysenko <mykolal@...com>, 
 Shuah Khan <shuah@...nel.org>
Cc: ebpf@...uxfoundation.org, netdev@...r.kernel.org, bpf@...r.kernel.org, 
 linux-kselftest@...r.kernel.org, linux-kernel@...r.kernel.org, 
 Thomas Petazzoni <thomas.petazzoni@...tlin.com>, 
 Alexis Lothoré <alexis.lothore@...tlin.com>
Subject: [PATCH 2/3] selftests/bpf: integrate test_xdp_veth into test_progs

test_xdp_veth.sh tests that XDP return codes work as expected, by bringing
up multiple veth pairs isolated in different namespaces, attaching specific
xdp programs to each interface, and ensuring that the whole chain allows to
ping one end interface from the first one. The test runs well but is
currently not integrated in test_progs, which prevents it from being run
automatically in the CI infrastructure.

Rewrite it as a C test relying on libbpf to allow running it in the CI
infrastructure. The new code brings up the same network infrastructure and
reuses the same eBPF programs as test_xdp_veth.sh, for which skeletons are
already generated by the bpf tests makefile.

Signed-off-by: Alexis Lothoré <alexis.lothore@...tlin.com>
---
The new code has been tested in an aarch64 qemu instance:
Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED

I have also checked that some minor alterations in the network
configuration (altering the redirect map, or not loading one of the xdp
programs) make the test fail.

On my testing setup, the test takes a bit more than 3 seconds to run on
average.
---
 .../selftests/bpf/prog_tests/test_xdp_veth.c       | 234 +++++++++++++++++++++
 1 file changed, 234 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c
new file mode 100644
index 000000000000..40d85aece984
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_xdp_veth.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * Create 3 namespaces with 3 veth peers, and
+ * forward packets in-between using native XDP
+ *
+ *                      XDP_TX
+ * NS1(veth11)        NS2(veth22)        NS3(veth33)
+ *      |                  |                  |
+ *      |                  |                  |
+ *   (veth1,            (veth2,            (veth3,
+ *   id:111)            id:122)            id:133)
+ *     ^ |                ^ |                ^ |
+ *     | |  XDP_REDIRECT  | |  XDP_REDIRECT  | |
+ *     | ------------------ ------------------ |
+ *     -----------------------------------------
+ *                    XDP_REDIRECT
+ */
+
+#define _GNU_SOURCE
+#include <net/if.h>
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "xdp_dummy.skel.h"
+#include "xdp_redirect_map.skel.h"
+#include "xdp_tx.skel.h"
+
+#define VETH_PAIRS_COUNT	3
+#define NS_NAME_MAX_LEN		16
+#define NS_SUFFIX_LEN		6
+#define VETH_NAME_MAX_LEN	16
+#define IP_SRC				"10.1.1.11"
+#define IP_DST				"10.1.1.33"
+#define IP_CMD_MAX_LEN		128
+
+struct skeletons {
+	struct xdp_dummy *xdp_dummy;
+	struct xdp_tx *xdp_tx;
+	struct xdp_redirect_map *xdp_redirect_maps;
+};
+
+struct veth_configuration {
+	char local_veth[VETH_NAME_MAX_LEN]; /* Interface in main namespace */
+	char remote_veth[VETH_NAME_MAX_LEN]; /* Peer interface in dedicated namespace*/
+	char namespace[NS_NAME_MAX_LEN]; /* Namespace for the remote veth */
+	char next_veth[VETH_NAME_MAX_LEN]; /* Local interface to redirect traffic to */
+	char *remote_addr; /* IP address of the remote veth */
+};
+
+static struct veth_configuration config[VETH_PAIRS_COUNT] = {
+	{
+		.local_veth = "veth1",
+		.remote_veth = "veth11",
+		.next_veth = "veth2",
+		.remote_addr = IP_SRC
+	},
+	{
+		.local_veth = "veth2",
+		.remote_veth = "veth22",
+		.next_veth = "veth3",
+		.remote_addr = NULL
+	},
+	{
+		.local_veth = "veth3",
+		.remote_veth = "veth33",
+		.next_veth = "veth1",
+		.remote_addr = IP_DST
+	}
+};
+
+static void generate_random_ns_name(int index, char *out)
+{
+	int random, count, i;
+
+	count = snprintf(out, NS_NAME_MAX_LEN, "ns%d-", index);
+	for(i=0; i<NS_SUFFIX_LEN; i++) {
+		random=rand() % 2;
+		out[count++]= random ? 'a' + rand() % 26 : 'A' + rand() % 26;
+	}
+	out[count] = 0;
+}
+
+static int attach_programs_to_veth_pair(struct skeletons *skeletons, int index)
+{
+	struct bpf_program *local_prog, *remote_prog;
+	struct bpf_link **local_link, **remote_link;
+	struct nstoken *nstoken;
+	struct bpf_link *link;
+	int interface;
+
+	switch(index) {
+		case 0:
+			local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_0;
+			local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_0;
+			remote_prog = skeletons->xdp_dummy->progs.xdp_dummy_prog;
+			remote_link = &skeletons->xdp_dummy->links.xdp_dummy_prog;
+			break;
+		case 1:
+			local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_1;
+			local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_1;
+			remote_prog = skeletons->xdp_tx->progs.xdp_tx;
+			remote_link = &skeletons->xdp_tx->links.xdp_tx;
+			break;
+		case 2:
+			local_prog = skeletons->xdp_redirect_maps->progs.xdp_redirect_map_2;
+			local_link = &skeletons->xdp_redirect_maps->links.xdp_redirect_map_2;
+			remote_prog = skeletons->xdp_dummy->progs.xdp_dummy_prog;
+			remote_link = &skeletons->xdp_dummy->links.xdp_dummy_prog;
+			break;
+	}
+	interface = if_nametoindex(config[index].local_veth);
+	if(!ASSERT_NEQ(interface, 0, "non zero interface index"))
+		return -1;
+	link = bpf_program__attach_xdp(local_prog, interface);
+	if (!ASSERT_OK_PTR(link, "attach xdp program to local veth"))
+		return -1;
+	*local_link = link;
+	nstoken = open_netns(config[index].namespace);
+	if (!ASSERT_OK_PTR(nstoken, "switch to remote veth namespace"))
+		return -1;
+	interface = if_nametoindex(config[index].remote_veth);
+	if(!ASSERT_NEQ(interface, 0, "non zero interface index"))
+		return -1;
+	link = bpf_program__attach_xdp(remote_prog, interface);
+	*remote_link = link;
+	close_netns(nstoken);
+	if (!ASSERT_OK_PTR(link, "attach xdp program to remote veth"))
+		return -1;
+
+	return 0;
+}
+
+static int configure_network(struct skeletons *skeletons) {
+	int interface_id;
+	int map_fd;
+	int err;
+	int i=0;
+
+	/* First create and configure all interfaces */
+	for(i=0; i<VETH_PAIRS_COUNT; i++) {
+		generate_random_ns_name(i+1, config[i].namespace);
+
+		SYS(fail, "ip netns add %s", config[i].namespace);
+		SYS(fail, "ip link add %s type veth peer name %s netns %s",
+				config[i].local_veth,
+				config[i].remote_veth,
+				config[i].namespace);
+		SYS(fail, "ip link set dev %s up", config[i].local_veth);
+		if (config[i].remote_addr)
+			SYS(fail, "ip -n %s addr add %s/24 dev %s",
+					   config[i].namespace, config[i].remote_addr, config[i].remote_veth);
+		SYS(fail, "ip -n %s link set dev %s up",
+				   config[i].namespace, config[i].remote_veth);
+	}
+
+	/* Then configure the redirect map and attach programs to interfaces */
+	map_fd = bpf_map__fd(skeletons->xdp_redirect_maps->maps.tx_port);
+	if (!ASSERT_GE(map_fd, 0, "open redirect map"))
+		goto fail;
+	for (i=0; i<VETH_PAIRS_COUNT; i++) {
+		interface_id = if_nametoindex(config[i].next_veth);
+		if(!ASSERT_NEQ(interface_id, 0, "non zero interface index"))
+			goto fail;
+		err = bpf_map_update_elem(map_fd, &i, &interface_id, BPF_ANY);
+		if (!ASSERT_OK(err, "configure interface redirection through map"))
+			goto fail;
+		if(attach_programs_to_veth_pair(skeletons, i))
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	return -1;
+}
+
+static void cleanup_network()
+{
+	char cmd[IP_CMD_MAX_LEN];
+	int i;
+
+	/* Deleting namespaces is enough to automatically remove veth pairs as well
+	 */
+	for(i=0; i<VETH_PAIRS_COUNT; i++) {
+		if(config[i].namespace[0] == 0)
+			continue;
+		snprintf(cmd, IP_CMD_MAX_LEN, "ip netns del %s", config[i].namespace);
+		system(cmd);
+	}
+}
+
+static int check_ping(struct skeletons *skeletons)
+{
+	char cmd[IP_CMD_MAX_LEN];
+
+	/* Test: if all interfaces are properly configured, we must be able to ping
+	 * veth33 from veth11
+	 */
+	snprintf(cmd, IP_CMD_MAX_LEN,
+			 "ip netns exec %s ping -c 1 -W 1 %s > /dev/null",
+			 config[0].namespace, IP_DST);
+	return system(cmd);
+}
+
+void test_xdp_veth_redirect()
+{
+	struct skeletons skeletons = {};
+
+	skeletons.xdp_dummy = xdp_dummy__open_and_load();
+	if (!ASSERT_OK_PTR(skeletons.xdp_dummy, "xdp_dummy__open_and_load"))
+		return;
+
+	skeletons.xdp_tx = xdp_tx__open_and_load();
+	if (!ASSERT_OK_PTR(skeletons.xdp_tx, "xdp_tx__open_and_load"))
+		goto destroy_xdp_dummy;
+
+	skeletons.xdp_redirect_maps = xdp_redirect_map__open_and_load();
+	if (!ASSERT_OK_PTR(skeletons.xdp_redirect_maps, "xdp_redirect_map__open_and_load"))
+		goto destroy_xdp_tx;
+
+	if(configure_network(&skeletons))
+		goto destroy_xdp_redirect_map;
+
+	ASSERT_OK(check_ping(&skeletons), "ping");
+
+
+destroy_xdp_redirect_map:
+	xdp_redirect_map__destroy(skeletons.xdp_redirect_maps);
+destroy_xdp_tx:
+	xdp_tx__destroy(skeletons.xdp_tx);
+destroy_xdp_dummy:
+	xdp_dummy__destroy(skeletons.xdp_dummy);
+
+	cleanup_network();
+}

-- 
2.45.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ