[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20221011152119.89895-1-kuniyu@amazon.com>
Date: Tue, 11 Oct 2022 08:21:19 -0700
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: <pabeni@...hat.com>
CC: <davem@...emloft.net>, <dsahern@...nel.org>, <edumazet@...gle.com>,
<kraig@...gle.com>, <kuba@...nel.org>, <kuni1840@...il.com>,
<kuniyu@...zon.com>, <linux-kernel@...r.kernel.org>,
<martin.lau@...nel.org>, <netdev@...r.kernel.org>,
<willemb@...gle.com>, <yoshfuji@...ux-ipv6.org>
Subject: Re: [PATCH v1 net 3/3] selftest: Add test for SO_INCOMING_CPU.
From: Paolo Abeni <pabeni@...hat.com>
Date: Tue, 11 Oct 2022 13:34:58 +0200
> On Mon, 2022-10-10 at 10:43 -0700, Kuniyuki Iwashima wrote:
> > Some highly optimised applications use SO_INCOMING_CPU to make them
> > efficient, but they didn't test if it's working correctly by getsockopt()
> > to avoid slowing down. As a result, no one noticed it had been broken
> > for years, so it's a good time to add a test to catch future regression.
> >
> > The test does
> >
> > 1) Create $(nproc) TCP listeners associated with each CPU.
> >
> > 2) Create 32 child sockets for each listener by calling
> > sched_setaffinity() for each CPU.
> >
> > 3) Check if accept()ed sockets' sk_incoming_cpu matches
> > listener's one.
> >
> > If we see -EAGAIN, SO_INCOMING_CPU is broken. However, we might not see
> > any error even if broken; the kernel could miraculously distribute all SYN
> > to correct listeners. Not to let that happen, we must increase the number
> > of clients and CPUs to some extent, so the test requires $(nproc) >= 2 and
> > creates 64 sockets at least.
> >
> > Test:
> > $ nproc
> > 96
> > $ ./so_incoming_cpu
> >
> > Before the previous patch:
> >
> > # Starting 1 tests from 2 test cases.
> > # RUN so_incoming_cpu.test1 ...
> > # so_incoming_cpu.c:129:test1:Expected cpu (82) == i (0)
> > # test1: Test terminated by assertion
> > # FAIL so_incoming_cpu.test1
> > not ok 1 so_incoming_cpu.test1
> > # FAILED: 0 / 1 tests passed.
> > # Totals: pass:0 fail:1 xfail:0 xpass:0 skip:0 error:0
> >
> > After:
> >
> > # Starting 1 tests from 2 test cases.
> > # RUN so_incoming_cpu.test1 ...
> > # so_incoming_cpu.c:137:test1:SO_INCOMING_CPU is very likely to be working correctly with 3072 sockets.
> > # OK so_incoming_cpu.test1
> > ok 1 so_incoming_cpu.test1
> > # PASSED: 1 / 1 tests passed.
> > # Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
> >
> > Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
> > ---
> > tools/testing/selftests/net/.gitignore | 1 +
> > tools/testing/selftests/net/Makefile | 1 +
> > tools/testing/selftests/net/so_incoming_cpu.c | 148 ++++++++++++++++++
> > 3 files changed, 150 insertions(+)
> > create mode 100644 tools/testing/selftests/net/so_incoming_cpu.c
> >
> > diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
> > index 3d7adee7a3e6..ff8807cc9c2e 100644
> > --- a/tools/testing/selftests/net/.gitignore
> > +++ b/tools/testing/selftests/net/.gitignore
> > @@ -25,6 +25,7 @@ rxtimestamp
> > sk_bind_sendto_listen
> > sk_connect_zero_addr
> > socket
> > +so_incoming_cpu
> > so_netns_cookie
> > so_txtime
> > stress_reuseport_listen
> > diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
> > index 2a6b0bc648c4..ba57e7e7dc86 100644
> > --- a/tools/testing/selftests/net/Makefile
> > +++ b/tools/testing/selftests/net/Makefile
> > @@ -70,6 +70,7 @@ TEST_PROGS += io_uring_zerocopy_tx.sh
> > TEST_GEN_FILES += bind_bhash
> > TEST_GEN_PROGS += sk_bind_sendto_listen
> > TEST_GEN_PROGS += sk_connect_zero_addr
> > +TEST_GEN_PROGS += so_incoming_cpu
> >
> > TEST_FILES := settings
> >
> > diff --git a/tools/testing/selftests/net/so_incoming_cpu.c b/tools/testing/selftests/net/so_incoming_cpu.c
> > new file mode 100644
> > index 000000000000..0ee0f2e393eb
> > --- /dev/null
> > +++ b/tools/testing/selftests/net/so_incoming_cpu.c
> > @@ -0,0 +1,148 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/* Copyright Amazon.com Inc. or its affiliates. */
> > +#define _GNU_SOURCE
> > +#include <sched.h>
> > +
> > +#include <netinet/in.h>
> > +#include <sys/socket.h>
> > +#include <sys/sysinfo.h>
> > +
> > +#include "../kselftest_harness.h"
> > +
> > +#define CLIENT_PER_SERVER 32 /* More sockets, more reliable */
> > +#define NR_SERVER self->nproc
> > +#define NR_CLIENT (CLIENT_PER_SERVER * NR_SERVER)
> > +
> > +FIXTURE(so_incoming_cpu)
> > +{
> > + int nproc;
> > + int *servers;
> > + union {
> > + struct sockaddr addr;
> > + struct sockaddr_in in_addr;
> > + };
> > + socklen_t addrlen;
> > +};
> > +
> > +FIXTURE_SETUP(so_incoming_cpu)
> > +{
> > + self->nproc = get_nprocs();
> > + ASSERT_LE(2, self->nproc);
> > +
> > + self->servers = malloc(sizeof(int) * NR_SERVER);
> > + ASSERT_NE(self->servers, NULL);
> > +
> > + self->in_addr.sin_family = AF_INET;
> > + self->in_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
> > + self->in_addr.sin_port = htons(0);
> > + self->addrlen = sizeof(struct sockaddr_in);
> > +}
> > +
> > +FIXTURE_TEARDOWN(so_incoming_cpu)
> > +{
> > + int i;
> > +
> > + for (i = 0; i < NR_SERVER; i++)
> > + close(self->servers[i]);
> > +
> > + free(self->servers);
> > +}
> > +
> > +void create_servers(struct __test_metadata *_metadata,
> > + FIXTURE_DATA(so_incoming_cpu) *self)
> > +{
> > + int i, fd, ret;
> > +
> > + for (i = 0; i < NR_SERVER; i++) {
> > + fd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0);
> > + ASSERT_NE(fd, -1);
> > +
> > + ret = setsockopt(fd, SOL_SOCKET, SO_INCOMING_CPU, &i, sizeof(int));
> > + ASSERT_EQ(ret, 0);
> > +
> > + ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &(int){1}, sizeof(int));
> > + ASSERT_EQ(ret, 0);
> > +
> > + ret = bind(fd, &self->addr, self->addrlen);
> > + ASSERT_EQ(ret, 0);
> > +
> > + if (i == 0) {
> > + ret = getsockname(fd, &self->addr, &self->addrlen);
> > + ASSERT_EQ(ret, 0);
> > + }
> > +
> > + /* We don't use CLIENT_PER_SERVER here not to block
> > + * this test at connect() if SO_INCOMING_CPU is broken.
> > + */
> > + ret = listen(fd, NR_CLIENT);
> > + ASSERT_EQ(ret, 0);
> > +
> > + self->servers[i] = fd;
> > + }
> > +}
> > +
> > +void create_clients(struct __test_metadata *_metadata,
> > + FIXTURE_DATA(so_incoming_cpu) *self)
> > +{
> > + cpu_set_t cpu_set;
> > + int i, j, fd, ret;
> > +
> > + for (i = 0; i < NR_SERVER; i++) {
> > + CPU_ZERO(&cpu_set);
> > +
> > + CPU_SET(i, &cpu_set);
> > + ASSERT_EQ(CPU_COUNT(&cpu_set), 1);
> > + ASSERT_NE(CPU_ISSET(i, &cpu_set), 0);
> > +
> > + /* Make sure SYN will be processed on the i-th CPU
> > + * and finally distributed to the i-th listener.
> > + */
> > + sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
> > + ASSERT_EQ(ret, 0);
> > +
> > + for (j = 0; j < CLIENT_PER_SERVER; j++) {
> > + fd = socket(AF_INET, SOCK_STREAM, 0);
> > + ASSERT_NE(fd, -1);
> > +
> > + ret = connect(fd, &self->addr, self->addrlen);
> > + ASSERT_EQ(ret, 0);
> > +
> > + close(fd);
> > + }
> > + }
> > +}
> > +
> > +void verify_incoming_cpu(struct __test_metadata *_metadata,
> > + FIXTURE_DATA(so_incoming_cpu) *self)
> > +{
> > + int i, j, fd, cpu, ret, total = 0;
> > + socklen_t len = sizeof(int);
> > +
> > + for (i = 0; i < NR_SERVER; i++) {
> > + for (j = 0; j < CLIENT_PER_SERVER; j++) {
> > + /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */
> > + fd = accept(self->servers[i], &self->addr, &self->addrlen);
> > + ASSERT_NE(fd, -1);
> > +
> > + ret = getsockopt(fd, SOL_SOCKET, SO_INCOMING_CPU, &cpu, &len);
> > + ASSERT_EQ(ret, 0);
> > + ASSERT_EQ(cpu, i);
> > +
> > + close(fd);
> > + total++;
> > + }
> > + }
> > +
> > + ASSERT_EQ(total, NR_CLIENT);
> > + TH_LOG("SO_INCOMING_CPU is very likely to be "
> > + "working correctly with %d sockets.", total);
> > +}
> > +
> > +TEST_F(so_incoming_cpu, test1)
> > +{
> > + create_servers(_metadata, self);
> > + create_clients(_metadata, self);
> > + verify_incoming_cpu(_metadata, self);
> > +}
>
> I think it would be nicer if you could add more test-cases, covering
> e.g.:
> - set SO_INCOMING_CPU after SO_REUSE_PORT,
> - initially including a socket without SO_INCOMING_CPU and the removing
> it from the soreuseport set
I agree that, actually I did the tests with python :)
I'll add the cases in the next spin.
Thank you.
Powered by blists - more mailing lists