[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180702213908.1246455-3-brakmo@fb.com>
Date: Mon, 2 Jul 2018 14:39:08 -0700
From: Lawrence Brakmo <brakmo@...com>
To: netdev <netdev@...r.kernel.org>
CC: Kernel Team <kernel-team@...com>, Blake Matheny <bmatheny@...com>,
Alexei Starovoitov <ast@...com>,
Neal Cardwell <ncardwell@...gle.com>,
Yuchung Cheng <ycheng@...gle.com>,
Steve Ibanez <sibanez@...nford.edu>,
Eric Dumazet <eric.dumazet@...il.com>
Subject: [PATCH net-next v2 2/2] tcp: ack immediately when a cwr packet arrives
We observed high 99 and 99.9% latencies when doing RPCs with DCTCP. The
problem is triggered when the last packet of a request arrives CE
marked. The reply will carry the ECE mark causing TCP to shrink its cwnd
to 1 (because there are no packets in flight). When the 1st packet of
the next request arrives, the ACK was sometimes delayed even though it
is CWR marked, adding up to 40ms to the RPC latency.
This patch insures that CWR makred data packets arriving will be acked
immediately.
Modified based on comments by Neal Cardwell <ncardwell@...gle.com>
Signed-off-by: Lawrence Brakmo <brakmo@...com>
---
net/ipv4/tcp_input.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 76ca88f63b70..6fd1f2378f6c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -254,10 +254,16 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
-static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
+static void __tcp_ecn_check(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ /* If the sender is telling us it has entered CWR, then its cwnd may be
+ * very low (even just 1 packet), so we should ACK immediately.
+ */
+ if (tcp_hdr(skb)->cwr)
+ tcp_enter_quickack_mode(sk, 2);
+
switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
case INET_ECN_NOT_ECT:
/* Funny extension: if ECT is not set on a segment,
@@ -286,10 +292,10 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
}
}
-static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
+static void tcp_ecn_check(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
- __tcp_ecn_check_ce(sk, skb);
+ __tcp_ecn_check(sk, skb);
}
static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
@@ -715,7 +721,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
}
icsk->icsk_ack.lrcvtime = now;
- tcp_ecn_check_ce(sk, skb);
+ tcp_ecn_check(sk, skb);
if (skb->len >= 128)
tcp_grow_window(sk, skb);
@@ -4439,7 +4445,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
u32 seq, end_seq;
bool fragstolen;
- tcp_ecn_check_ce(sk, skb);
+ tcp_ecn_check(sk, skb);
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
--
2.17.1
Powered by blists - more mailing lists