[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260204055147.1682705-2-edumazet@google.com>
Date: Wed, 4 Feb 2026 05:51:44 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Simon Horman <horms@...nel.org>, Kuniyuki Iwashima <kuniyu@...gle.com>, netdev@...r.kernel.org,
eric.dumazet@...il.com, Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH v2 net-next 1/4] inet: move reqsk_queue_alloc() to net/ipv4/inet_connection_sock.c
Only called once from inet_csk_listen_start(), it can be static.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@...gle.com>
---
include/net/request_sock.h | 2 --
net/core/request_sock.c | 24 ------------------------
net/ipv4/inet_connection_sock.c | 9 +++++++++
3 files changed, 9 insertions(+), 26 deletions(-)
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 9b9e04f6bb8931088d9bb2fa7d4420fecf235895..23bb909771fbacbbd31f614b1754f0c24e602645 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -196,8 +196,6 @@ struct request_sock_queue {
*/
};
-void reqsk_queue_alloc(struct request_sock_queue *queue);
-
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
bool reset);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 897a8f01a67b4e09197da2241bc5c33ea3d90c29..31389f875b19b56592e81576b8812359017daac9 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -16,30 +16,6 @@
#include <net/request_sock.h>
-/*
- * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
- * One SYN_RECV socket costs about 80bytes on a 32bit machine.
- * It would be better to replace it with a global counter for all sockets
- * but then some measure against one socket starving all other sockets
- * would be needed.
- *
- * The minimum value of it is 128. Experiments with real servers show that
- * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems.
- * This value is adjusted to 128 for low memory machines,
- * and it will increase in proportion to the memory of machine.
- * Note : Dont forget somaxconn that may limit backlog too.
- */
-
-void reqsk_queue_alloc(struct request_sock_queue *queue)
-{
- queue->fastopenq.rskq_rst_head = NULL;
- queue->fastopenq.rskq_rst_tail = NULL;
- queue->fastopenq.qlen = 0;
-
- queue->rskq_accept_head = NULL;
-}
-
/*
* This function is called to set a Fast Open socket's "fastopen_rsk" field
* to NULL when a TFO socket no longer needs to access the request_sock.
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 018e8ffc07176dd4f6ecbaae5697b90b67cb1294..60f2ee039c05f46bdb95f6211a8f9eafbedb9610 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1314,6 +1314,15 @@ static int inet_ulp_can_listen(const struct sock *sk)
return 0;
}
+static void reqsk_queue_alloc(struct request_sock_queue *queue)
+{
+ queue->fastopenq.rskq_rst_head = NULL;
+ queue->fastopenq.rskq_rst_tail = NULL;
+ queue->fastopenq.qlen = 0;
+
+ queue->rskq_accept_head = NULL;
+}
+
int inet_csk_listen_start(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
--
2.53.0.rc2.204.g2597b5adb4-goog
Powered by blists - more mailing lists