lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240913150913.1280238-4-sdf@fomichev.me>
Date: Fri, 13 Sep 2024 08:09:12 -0700
From: Stanislav Fomichev <sdf@...ichev.me>
To: netdev@...r.kernel.org
Cc: davem@...emloft.net,
	edumazet@...gle.com,
	kuba@...nel.org,
	pabeni@...hat.com,
	Mina Almasry <almasrymina@...gle.com>
Subject: [RFC PATCH net-next v1 3/4] selftests: ncdevmem: Implement loopback mode

In loopback mode, start both client and server on
the same IP and share dmabuf. The kernel will forward
the packets back to the receiving socket. Note that in
this mode, the server doesn't exercise bind-rx and
flow steering. Driver's TX and RX paths are also bypassed.
IOW, we are testing only UAPI and TCP stack.

Running with the following:
  dev=eth0
  addr=192.168.1.4

  ip addr add $addr dev $dev
  ip link set $dev up
  ret=$(echo -e "hello\nworld" | ./tools/testing/selftests/drivers/net/ncdevmem -L -f $dev -s ::ffff:$addr -p 5201)
  echo "[$ret]"

  want=$(echo -e "hello\nworld")
  if [ "$ret" != "$want" ]; then
          echo "FAIL!"
          exit 1
  fi

Outputs:
  using queues 0..1
  binding to address ::ffff:192.168.1.4:5201
  Waiting or connection on ::ffff:192.168.1.4:5201
  got tx dmabuf id=1
  Connect to ::ffff:192.168.1.4 5201 (via eth0)
  Got connection from ::ffff:192.168.1.4:54040
  DEBUG: read line_size=6
  DEBUG: sendmsg_ret=6
  recvmsg ret=6
  received frag_page=0, in_page_offset=0, frag_offset=0, frag_size=6, token=1, total_received=6, dmabuf_id=1
  total_received=6
  tx complete [0,0]
  DEBUG: read line_size=6
  DEBUG: sendmsg_ret=6
  recvmsg ret=6
  received frag_page=0, in_page_offset=6, frag_offset=6, frag_size=6, token=1, total_received=12, dmabuf_id=1
  total_received=12
  tx complete [1,1]
  ncdevmem: tx ok
  recvmsg ret=0
  client exited
  ncdevmem: ok
  page_aligned_frags=0, non_page_aligned_frags=2
  [hello
  world]

Cc: Mina Almasry <almasrymina@...gle.com>
Signed-off-by: Stanislav Fomichev <sdf@...ichev.me>
---
 .../testing/selftests/drivers/net/ncdevmem.c  | 97 +++++++++++++------
 1 file changed, 67 insertions(+), 30 deletions(-)

diff --git a/tools/testing/selftests/drivers/net/ncdevmem.c b/tools/testing/selftests/drivers/net/ncdevmem.c
index 4e0dbe2e515b..615818cf5349 100644
--- a/tools/testing/selftests/drivers/net/ncdevmem.c
+++ b/tools/testing/selftests/drivers/net/ncdevmem.c
@@ -12,6 +12,7 @@
 #define __iovec_defined
 #include <fcntl.h>
 #include <malloc.h>
+#include <pthread.h>
 #include <error.h>
 
 #include <arpa/inet.h>
@@ -52,6 +53,9 @@ static char *ifname;
 static unsigned int ifindex;
 static unsigned int dmabuf_id;
 static unsigned int tx_dmabuf_id;
+static bool loopback;
+static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
 
 struct memory_buffer {
 	int fd;
@@ -358,6 +362,8 @@ static int bind_tx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
 
 	fprintf(stderr, "got tx dmabuf id=%d\n", rsp->id);
 	tx_dmabuf_id = rsp->id;
+	if (loopback)
+		dmabuf_id = tx_dmabuf_id;
 
 	netdev_bind_tx_req_free(req);
 	netdev_bind_tx_rsp_free(rsp);
@@ -409,11 +415,11 @@ static int do_server(struct memory_buffer *mem)
 	struct sockaddr_in6 client_addr;
 	struct sockaddr_in6 server_sin;
 	size_t page_aligned_frags = 0;
+	struct ynl_sock *ys = NULL;
 	size_t total_received = 0;
 	socklen_t client_addr_len;
 	bool is_devmem = false;
 	char *tmp_mem = NULL;
-	struct ynl_sock *ys;
 	char iobuf[819200];
 	char buffer[256];
 	int socket_fd;
@@ -425,33 +431,33 @@ static int do_server(struct memory_buffer *mem)
 	if (ret < 0)
 		error(1, 0, "parse server address");
 
-	if (reset_flow_steering())
-		error(1, 0, "Failed to reset flow steering\n");
+	if (!loopback) {
+		if (reset_flow_steering())
+			error(1, 0, "Failed to reset flow steering\n");
 
-	if (configure_headersplit(1))
-		error(1, 0, "Failed to enable TCP header split\n");
+		if (configure_headersplit(1))
+			error(1, 0, "Failed to enable TCP header split\n");
 
-	/* Configure RSS to divert all traffic from our devmem queues */
-	if (configure_rss())
-		error(1, 0, "Failed to configure rss\n");
+		if (configure_rss())
+			error(1, 0, "Failed to configure rss\n");
 
-	/* Flow steer our devmem flows to start_queue */
-	if (configure_flow_steering(&server_sin))
-		error(1, 0, "Failed to configure flow steering\n");
+		if (configure_flow_steering(&server_sin))
+			error(1, 0, "Failed to configure flow steering\n");
 
-	sleep(1);
+		sleep(1);
 
-	queues = malloc(sizeof(*queues) * num_queues);
+		queues = malloc(sizeof(*queues) * num_queues);
 
-	for (i = 0; i < num_queues; i++) {
-		queues[i]._present.type = 1;
-		queues[i]._present.id = 1;
-		queues[i].type = NETDEV_QUEUE_TYPE_RX;
-		queues[i].id = start_queue + i;
-	}
+		for (i = 0; i < num_queues; i++) {
+			queues[i]._present.type = 1;
+			queues[i]._present.id = 1;
+			queues[i].type = NETDEV_QUEUE_TYPE_RX;
+			queues[i].id = start_queue + i;
+		}
 
-	if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
-		error(1, 0, "Failed to bind\n");
+		if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
+			error(1, 0, "Failed to bind\n");
+	}
 
 	tmp_mem = malloc(mem->size);
 	if (!tmp_mem)
@@ -478,6 +484,12 @@ static int do_server(struct memory_buffer *mem)
 
 	client_addr_len = sizeof(client_addr);
 
+	if (loopback) {
+		pthread_mutex_lock(&mutex);
+		pthread_cond_signal(&cond);
+		pthread_mutex_unlock(&mutex);
+	}
+
 	inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer,
 		  sizeof(buffer));
 	fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
@@ -514,7 +526,7 @@ static int do_server(struct memory_buffer *mem)
 		}
 		if (ret == 0) {
 			fprintf(stderr, "client exited\n");
-			goto cleanup;
+			break;
 		}
 
 		i++;
@@ -585,15 +597,11 @@ static int do_server(struct memory_buffer *mem)
 	fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
 		page_aligned_frags, non_page_aligned_frags);
 
-	fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
-		page_aligned_frags, non_page_aligned_frags);
-
-cleanup:
-
 	free(tmp_mem);
 	close(client_fd);
 	close(socket_fd);
-	ynl_sock_destroy(ys);
+	if (ys)
+		ynl_sock_destroy(ys);
 
 	return 0;
 }
@@ -818,6 +826,15 @@ static int do_client(struct memory_buffer *mem)
 	return 0;
 }
 
+static void *server_thread(void *data)
+{
+	struct memory_buffer *mem = data;
+
+	do_server(mem);
+
+	return (void *)NULL;
+}
+
 int main(int argc, char *argv[])
 {
 	struct memory_buffer *mem;
@@ -825,11 +842,14 @@ int main(int argc, char *argv[])
 	int probe = 0;
 	int ret;
 
-	while ((opt = getopt(argc, argv, "ls:c:p:q:t:f:P")) != -1) {
+	while ((opt = getopt(argc, argv, "Lls:c:p:q:t:f:P")) != -1) {
 		switch (opt) {
 		case 'l':
 			is_server = 1;
 			break;
+		case 'L':
+			loopback = true;
+			break;
 		case 's':
 			server_ip = optarg;
 			break;
@@ -883,7 +903,24 @@ int main(int argc, char *argv[])
 	}
 
 	mem = provider->alloc(getpagesize() * NUM_PAGES);
-	ret = is_server ? do_server(mem) : do_client(mem);
+	if (loopback) {
+		pthread_t thread;
+		int rc;
+
+		rc = pthread_create(&thread, NULL, server_thread, mem);
+		if (rc != 0)
+			error(-1, -errno, "pthread_create failed");
+
+		pthread_mutex_lock(&mutex);
+		pthread_cond_wait(&cond, &mutex);
+		pthread_mutex_unlock(&mutex);
+
+		ret = do_client(mem);
+
+		pthread_join(thread, NULL);
+	} else {
+		ret = is_server ? do_server(mem) : do_client(mem);
+	}
 	provider->free(mem);
 
 	return ret;
-- 
2.46.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ