[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190323080542.173569-6-brakmo@fb.com>
Date: Sat, 23 Mar 2019 01:05:40 -0700
From: brakmo <brakmo@...com>
To: netdev <netdev@...r.kernel.org>
CC: Martin Lau <kafai@...com>, Alexei Starovoitov <ast@...com>,
Daniel Borkmann <daniel@...earbox.net>,
Eric Dumazet <eric.dumazet@...il.com>,
Kernel Team <Kernel-team@...com>
Subject: [PATCH bpf-next 5/7] bpf: sysctl for probe_on_drop
When a packet is dropped when calling queue_xmit in __tcp_transmit_skb
and packets_out is 0, it is beneficial to set a small probe timer.
Otherwise, the throughput for the flow can suffer because it may need to
depend on the probe timer to start sending again. The default value for
the probe timer is at least 200ms, this patch sets it to 20ms when a
packet is dropped and there are no other packets in flight.
This patch introduces a new sysctl, sysctl_tcp_probe_on_drop_ms, that is
used to specify the duration of the probe timer for the case described
earlier. The allowed values are between 0 and TCP_RTO_MIN. A value of 0
disables setting the probe timer with a small value.
Signed-off-by: Lawrence Brakmo <brakmo@...com>
---
include/net/netns/ipv4.h | 1 +
net/ipv4/sysctl_net_ipv4.c | 10 ++++++++++
net/ipv4/tcp_ipv4.c | 1 +
net/ipv4/tcp_output.c | 18 +++++++++++++++---
4 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 104a6669e344..d5716a193883 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -165,6 +165,7 @@ struct netns_ipv4 {
int sysctl_tcp_wmem[3];
int sysctl_tcp_rmem[3];
int sysctl_tcp_comp_sack_nr;
+ int sysctl_tcp_probe_on_drop_ms;
unsigned long sysctl_tcp_comp_sack_delay_ns;
struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ba0fc4b18465..50837e66313f 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
static int comp_sack_nr_max = 255;
static u32 u32_max_div_HZ = UINT_MAX / HZ;
+static int probe_on_drop_max = TCP_RTO_MIN;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -1219,6 +1220,15 @@ static struct ctl_table ipv4_net_table[] = {
.extra1 = &zero,
.extra2 = &comp_sack_nr_max,
},
+ {
+ .procname = "tcp_probe_on_drop_ms",
+ .data = &init_net.ipv4.sysctl_tcp_probe_on_drop_ms,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &probe_on_drop_max,
+ },
{
.procname = "udp_rmem_min",
.data = &init_net.ipv4.sysctl_udp_rmem_min,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 277d71239d75..5aba95850d61 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2679,6 +2679,7 @@ static int __net_init tcp_sk_init(struct net *net)
spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
atomic_set(&net->ipv4.tfo_active_disable_times, 0);
+ net->ipv4.sysctl_tcp_probe_on_drop_ms = 20;
/* Reno is always built in */
if (!net_eq(net, &init_net) &&
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 4522579aaca2..95a0102fde3b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1158,9 +1158,21 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
- if (unlikely(err > 0)) {
- tcp_enter_cwr(sk);
- err = net_xmit_eval(err);
+ if (unlikely(err)) {
+ if (unlikely(err > 0)) {
+ tcp_enter_cwr(sk);
+ err = net_xmit_eval(err);
+ }
+ /* Packet was dropped. If there are no packets out,
+ * we may need to depend on probe timer to start sending
+ * again. Hence, use a smaller value.
+ */
+ if (!tp->packets_out && !inet_csk(sk)->icsk_pending &&
+ sock_net(sk)->ipv4.sysctl_tcp_probe_on_drop_ms > 0) {
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ sock_net(sk)->ipv4.sysctl_tcp_probe_on_drop_ms,
+ TCP_RTO_MAX, NULL);
+ }
}
if (!err && oskb) {
tcp_update_skb_after_send(sk, oskb, prior_wstamp);
--
2.17.1
Powered by blists - more mailing lists