[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220908011022.45342-6-kuniyu@amazon.com>
Date: Wed, 7 Sep 2022 18:10:21 -0700
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
CC: Kuniyuki Iwashima <kuniyu@...zon.com>,
Kuniyuki Iwashima <kuni1840@...il.com>,
<netdev@...r.kernel.org>
Subject: [PATCH v6 net-next 5/6] tcp: Save unnecessary inet_twsk_purge() calls.
While destroying netns, we call inet_twsk_purge() in tcp_sk_exit_batch()
and tcpv6_net_exit_batch() for AF_INET and AF_INET6. These commands
trigger the kernel to walk through the potentially big ehash twice even
though the netns has no TIME_WAIT sockets.
# ip netns add test
# ip netns del test
or
# unshare -n /bin/true >/dev/null
When tw_refcount is 1, we need not call inet_twsk_purge() at least
for the net. We can save such unneeded iterations if all netns in
net_exit_list have no TIME_WAIT sockets. This change eliminates
the tax by the additional unshare() described in the next patch to
guarantee the per-netns ehash size.
Tested:
# mount -t debugfs none /sys/kernel/debug/
# echo cleanup_net > /sys/kernel/debug/tracing/set_ftrace_filter
# echo inet_twsk_purge >> /sys/kernel/debug/tracing/set_ftrace_filter
# echo function > /sys/kernel/debug/tracing/current_tracer
# cat ./add_del_unshare.sh
for i in `seq 1 40`
do
(for j in `seq 1 100` ; do unshare -n /bin/true >/dev/null ; done) &
done
wait;
# ./add_del_unshare.sh
Before the patch:
# cat /sys/kernel/debug/tracing/trace_pipe
kworker/u128:0-8 [031] ...1. 174.162765: cleanup_net <-process_one_work
kworker/u128:0-8 [031] ...1. 174.240796: inet_twsk_purge <-cleanup_net
kworker/u128:0-8 [032] ...1. 174.244759: inet_twsk_purge <-tcp_sk_exit_batch
kworker/u128:0-8 [034] ...1. 174.290861: cleanup_net <-process_one_work
kworker/u128:0-8 [039] ...1. 175.245027: inet_twsk_purge <-cleanup_net
kworker/u128:0-8 [046] ...1. 175.290541: inet_twsk_purge <-tcp_sk_exit_batch
kworker/u128:0-8 [037] ...1. 175.321046: cleanup_net <-process_one_work
kworker/u128:0-8 [024] ...1. 175.941633: inet_twsk_purge <-cleanup_net
kworker/u128:0-8 [025] ...1. 176.242539: inet_twsk_purge <-tcp_sk_exit_batch
After:
# cat /sys/kernel/debug/tracing/trace_pipe
kworker/u128:0-8 [038] ...1. 428.116174: cleanup_net <-process_one_work
kworker/u128:0-8 [038] ...1. 428.262532: cleanup_net <-process_one_work
kworker/u128:0-8 [030] ...1. 429.292645: cleanup_net <-process_one_work
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
---
include/net/tcp.h | 1 +
net/ipv4/tcp_ipv4.c | 2 +-
net/ipv4/tcp_minisocks.c | 15 +++++++++++++++
net/ipv6/tcp_ipv6.c | 2 +-
4 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 735e957f7f4b..27e8d378c70a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -346,6 +346,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
+void tcp_twsk_purge(struct list_head *net_exit_list, int family);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9aa186af6c44..73e6854ea662 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -3206,7 +3206,7 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
struct net *net;
- inet_twsk_purge(&tcp_hashinfo, AF_INET);
+ tcp_twsk_purge(net_exit_list, AF_INET);
list_for_each_entry(net, net_exit_list, exit_list) {
WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 2e53981252d9..98c576d4b671 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -347,6 +347,21 @@ void tcp_twsk_destructor(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
+void tcp_twsk_purge(struct list_head *net_exit_list, int family)
+{
+ struct net *net;
+
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ /* The last refcount is decremented in tcp_sk_exit_batch() */
+ if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
+ continue;
+
+ inet_twsk_purge(&tcp_hashinfo, family);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(tcp_twsk_purge);
+
/* Warning : This function is called without sk_listener being locked.
* Be sure to read socket fields once, as their value could change under us.
*/
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f9ed47eb9b93..06beed4e23bf 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2229,7 +2229,7 @@ static void __net_exit tcpv6_net_exit(struct net *net)
static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
{
- inet_twsk_purge(&tcp_hashinfo, AF_INET6);
+ tcp_twsk_purge(net_exit_list, AF_INET6);
}
static struct pernet_operations tcpv6_net_ops = {
--
2.30.2
Powered by blists - more mailing lists