[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180516234017.172775-6-ycheng@google.com>
Date: Wed, 16 May 2018 16:40:14 -0700
From: Yuchung Cheng <ycheng@...gle.com>
To: davem@...emloft.net
Cc: netdev@...r.kernel.org, edumazet@...gle.com, ncardwell@...gle.com,
soheil@...gle.com, priyarjha@...gle.com,
Yuchung Cheng <ycheng@...gle.com>
Subject: [PATCH net-next 5/8] tcp: new helper tcp_timeout_mark_lost
Refactor using a new helper, tcp_timeout_mark_loss(), that marks packets
lost upon RTO.
Signed-off-by: Yuchung Cheng <ycheng@...gle.com>
Signed-off-by: Neal Cardwell <ncardwell@...gle.com>
Reviewed-by: Eric Dumazet <edumazet@...gle.com>
Reviewed-by: Soheil Hassas Yeganeh <soheil@...gle.com>
Reviewed-by: Priyaranjan Jha <priyarjha@...gle.com>
---
net/ipv4/tcp_input.c | 50 +++++++++++++++++++++++++-------------------
1 file changed, 29 insertions(+), 21 deletions(-)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6fb0a28977a0..af32accda2a9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1917,18 +1917,43 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
tp->undo_retrans = tp->retrans_out ? : -1;
}
-/* Enter Loss state. If we detect SACK reneging, forget all SACK information
+/* If we detect SACK reneging, forget all SACK information
* and reset tags completely, otherwise preserve SACKs. If receiver
* dropped its ofo queue, we will know this due to reneging detection.
*/
+static void tcp_timeout_mark_lost(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ bool is_reneg; /* is receiver reneging on SACKs? */
+
+ skb = tcp_rtx_queue_head(sk);
+ is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
+ if (is_reneg) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
+ tp->sacked_out = 0;
+ /* Mark SACK reneging until we recover from this loss event. */
+ tp->is_sack_reneg = 1;
+ } else if (tcp_is_reno(tp)) {
+ tcp_reset_reno_sack(tp);
+ }
+
+ skb_rbtree_walk_from(skb) {
+ if (is_reneg)
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
+ tcp_mark_skb_lost(sk, skb);
+ }
+ tcp_verify_left_out(tp);
+ tcp_clear_all_retrans_hints(tp);
+}
+
+/* Enter Loss state. */
void tcp_enter_loss(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
- struct sk_buff *skb;
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
- bool is_reneg; /* is receiver reneging on SACKs? */
/* Reduce ssthresh if it has not yet been made inside this window. */
if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
@@ -1944,24 +1969,7 @@ void tcp_enter_loss(struct sock *sk)
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_jiffies32;
- if (tcp_is_reno(tp))
- tcp_reset_reno_sack(tp);
-
- skb = tcp_rtx_queue_head(sk);
- is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
- if (is_reneg) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
- tp->sacked_out = 0;
- /* Mark SACK reneging until we recover from this loss event. */
- tp->is_sack_reneg = 1;
- }
- skb_rbtree_walk_from(skb) {
- if (is_reneg)
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
- tcp_mark_skb_lost(sk, skb);
- }
- tcp_verify_left_out(tp);
- tcp_clear_all_retrans_hints(tp);
+ tcp_timeout_mark_lost(sk);
/* Timeout in disordered state after receiving substantial DUPACKs
* suggests that the degree of reordering is over-estimated.
--
2.17.0.441.gb46fe60e1d-goog
Powered by blists - more mailing lists