[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250207152830.2527578-2-edumazet@google.com>
Date: Fri, 7 Feb 2025 15:28:26 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: netdev@...r.kernel.org, Neal Cardwell <ncardwell@...gle.com>,
Kuniyuki Iwashima <kuniyu@...zon.com>, Jason Xing <kernelxing@...cent.com>,
Simon Horman <horms@...nel.org>, eric.dumazet@...il.com,
Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next 1/5] tcp: remove tcp_reset_xmit_timer() @max_when argument
All callers use TCP_RTO_MAX, we can factorize this constant,
becoming a variable soon.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
include/net/tcp.h | 7 +++----
net/ipv4/tcp_input.c | 5 ++---
net/ipv4/tcp_output.c | 7 +++----
3 files changed, 8 insertions(+), 11 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5b2b04835688f65daa25ca208e29775326520e1e..356f5aa51ce22921320e34adec111fc4e412de8f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1422,11 +1422,10 @@ static inline unsigned long tcp_pacing_delay(const struct sock *sk)
static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what,
- unsigned long when,
- const unsigned long max_when)
+ unsigned long when)
{
inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
- max_when);
+ TCP_RTO_MAX);
}
/* Something is really bad, we could not queue an additional packet,
@@ -1455,7 +1454,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX);
+ tcp_probe0_base(sk));
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb82e01da911048b41ca380f913ef55566be79a7..cf5cb710f202b29563de51179eaed0823aff8090 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3282,8 +3282,7 @@ void tcp_rearm_rto(struct sock *sk)
*/
rto = usecs_to_jiffies(max_t(int, delta_us, 1));
}
- tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto);
}
}
@@ -3563,7 +3562,7 @@ static void tcp_ack_probe(struct sock *sk)
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
when = tcp_clamp_probe0_to_user_timeout(sk, when);
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when);
}
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bc95d2a5924fdc6ea609fa006432db9b13444706..93401dbf39d223a4943579786be5aa6d14e0ed8d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2911,7 +2911,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
- tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout);
return true;
}
@@ -3545,8 +3545,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
}
if (rearm_timer)
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto);
}
/* We allow to exceed memory limits for FIN packets to expedite
@@ -4402,7 +4401,7 @@ void tcp_send_probe0(struct sock *sk)
}
timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
--
2.48.1.502.g6dc24dfdaf-goog
Powered by blists - more mailing lists