[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250317120314.41404-3-kerneljasonxing@gmail.com>
Date: Mon, 17 Mar 2025 20:03:14 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
horms@...nel.org,
ncardwell@...gle.com,
kuniyu@...zon.com,
dsahern@...nel.org
Cc: netdev@...r.kernel.org,
Jason Xing <kerneljasonxing@...il.com>
Subject: [PATCH net-next v4 2/2] tcp: support TCP_DELACK_MAX_US for set/getsockopt use
Support adjusting/reading delayed ack max for socket level by using
set/getsockopt().
This option aligns with TCP_BPF_DELACK_MAX usage. Considering that bpf
option was implemented before this patch, so we need to use a standalone
new option for pure tcp set/getsockopt() use.
Add WRITE_ONCE/READ_ONCE() to prevent data-race if setsockopt()
happens to write one value to icsk_delack_max while icsk_delack_max is
being read.
Signed-off-by: Jason Xing <kerneljasonxing@...il.com>
---
include/uapi/linux/tcp.h | 1 +
net/ipv4/tcp.c | 13 ++++++++++++-
net/ipv4/tcp_output.c | 2 +-
3 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index b2476cf7058e..2377e22f2c4b 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -138,6 +138,7 @@ enum {
#define TCP_IS_MPTCP 43 /* Is MPTCP being used? */
#define TCP_RTO_MAX_MS 44 /* max rto time in ms */
#define TCP_RTO_MIN_US 45 /* min rto time in us */
+#define TCP_DELACK_MAX_US 46 /* max delayed ack time in us */
#define TCP_REPAIR_ON 1
#define TCP_REPAIR_OFF 0
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b89c1b676b8e..578e79024955 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3353,7 +3353,7 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_probes_tstamp = 0;
icsk->icsk_rto = TCP_TIMEOUT_INIT;
WRITE_ONCE(icsk->icsk_rto_min, TCP_RTO_MIN);
- icsk->icsk_delack_max = TCP_DELACK_MAX;
+ WRITE_ONCE(icsk->icsk_delack_max, TCP_DELACK_MAX);
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
tp->snd_cwnd_cnt = 0;
@@ -3841,6 +3841,14 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
WRITE_ONCE(inet_csk(sk)->icsk_rto_min, rto_min);
return 0;
}
+ case TCP_DELACK_MAX_US: {
+ int delack_max = usecs_to_jiffies(val);
+
+ if (delack_max > TCP_DELACK_MAX || delack_max < TCP_TIMEOUT_MIN)
+ return -EINVAL;
+ WRITE_ONCE(inet_csk(sk)->icsk_delack_max, delack_max);
+ return 0;
+ }
}
sockopt_lock_sock(sk);
@@ -4683,6 +4691,9 @@ int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_RTO_MIN_US:
val = jiffies_to_usecs(READ_ONCE(inet_csk(sk)->icsk_rto_min));
break;
+ case TCP_DELACK_MAX_US:
+ val = jiffies_to_usecs(READ_ONCE(inet_csk(sk)->icsk_delack_max));
+ break;
default:
return -ENOPROTOOPT;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 24e56bf96747..65aa26d65987 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -4179,7 +4179,7 @@ u32 tcp_delack_max(const struct sock *sk)
{
u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1;
- return min(inet_csk(sk)->icsk_delack_max, delack_from_rto_min);
+ return min(READ_ONCE(inet_csk(sk)->icsk_delack_max), delack_from_rto_min);
}
/* Send out a delayed ack, the caller does the policy checking
--
2.43.5
Powered by blists - more mailing lists