[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <152751766681.30935.2254734266951156690.stgit@alrua-kau>
Date: Mon, 28 May 2018 16:27:46 +0200
From: Toke Høiland-Jørgensen <toke@...e.dk>
To: netdev@...r.kernel.org, cake@...ts.bufferbloat.net
Cc: Yuchung Cheng <ycheng@...gle.com>,
Neal Cardwell <ncardwell@...gle.com>
Subject: [PATCH net-next v16 3/8] sch_cake: Add optional ACK filter
The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.
Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.
The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* bytes than the new packet being enqueued, including any
SACK options. This prevents duplicate ACKs from being filtered, to avoid
interfering with retransmission logic. In addition, we check TCP header
options and only drop those that are known to not interfere with sender
state. In particular, packets with unknown option codes are never dropped.
In aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.
The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).
Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.
In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).
Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.
Cc: Yuchung Cheng <ycheng@...gle.com>
Cc: Neal Cardwell <ncardwell@...gle.com>
Signed-off-by: Toke Høiland-Jørgensen <toke@...e.dk>
---
net/sched/sch_cake.c | 453 ++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 451 insertions(+), 2 deletions(-)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 10e208e4255d..68ac908470f1 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -757,6 +757,432 @@ static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
skb->next = NULL;
}
+static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+ struct ipv6hdr *buf)
+{
+ unsigned int offset = skb_network_offset(skb);
+ struct iphdr *iph;
+
+ iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+ if (!iph)
+ return NULL;
+
+ if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+ return skb_header_pointer(skb, offset + iph->ihl * 4,
+ sizeof(struct ipv6hdr), buf);
+
+ else if (iph->version == 4)
+ return iph;
+
+ else if (iph->version == 6)
+ return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+ buf);
+
+ return NULL;
+}
+
+static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+ void *buf, unsigned int bufsize)
+{
+ unsigned int offset = skb_network_offset(skb);
+ const struct ipv6hdr *ipv6h;
+ const struct tcphdr *tcph;
+ const struct iphdr *iph;
+ struct ipv6hdr _ipv6h;
+ struct tcphdr _tcph;
+
+ ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+ if (!ipv6h)
+ return NULL;
+
+ if (ipv6h->version == 4) {
+ iph = (struct iphdr *)ipv6h;
+ offset += iph->ihl * 4;
+
+ /* special-case 6in4 tunnelling, as that is a common way to get
+ * v6 connectivity in the home
+ */
+ if (iph->protocol == IPPROTO_IPV6) {
+ ipv6h = skb_header_pointer(skb, offset,
+ sizeof(_ipv6h), &_ipv6h);
+
+ if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
+ return NULL;
+
+ offset += sizeof(struct ipv6hdr);
+
+ } else if (iph->protocol != IPPROTO_TCP) {
+ return NULL;
+ }
+
+ } else if (ipv6h->version == 6) {
+ if (ipv6h->nexthdr != IPPROTO_TCP)
+ return NULL;
+
+ offset += sizeof(struct ipv6hdr);
+ } else {
+ return NULL;
+ }
+
+ tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (!tcph)
+ return NULL;
+
+ return skb_header_pointer(skb, offset,
+ min(__tcp_hdrlen(tcph), bufsize), buf);
+}
+
+static const void *cake_get_tcpopt(const struct tcphdr *tcph,
+ int code, int *oplen)
+{
+ /* inspired by tcp_parse_options in tcp_input.c */
+ int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
+ const u8 *ptr = (const u8 *)(tcph + 1);
+
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ if (opcode == TCPOPT_EOL)
+ break;
+ if (opcode == TCPOPT_NOP) {
+ length--;
+ continue;
+ }
+ opsize = *ptr++;
+ if (opsize < 2 || opsize > length)
+ break;
+
+ if (opcode == code) {
+ *oplen = opsize;
+ return ptr;
+ }
+
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+
+ return NULL;
+}
+
+/* Compare two SACK sequences. A sequence is considered greater if it SACKs more
+ * bytes than the other. In the case where both sequences ACKs bytes that the
+ * other doesn't, A is considered greater. DSACKs in A also makes A be
+ * considered greater.
+ *
+ * @return -1, 0 or 1 as normal compare functions
+ */
+static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
+ const struct tcphdr *tcph_b)
+{
+ const struct tcp_sack_block_wire *sack_a, *sack_b;
+ u32 ack_seq_a = ntohl(tcph_a->ack_seq);
+ u32 bytes_a = 0, bytes_b = 0;
+ int oplen_a, oplen_b;
+ bool first = true;
+
+ sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
+ sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
+
+ /* pointers point to option contents */
+ oplen_a -= TCPOLEN_SACK_BASE;
+ oplen_b -= TCPOLEN_SACK_BASE;
+
+ if (sack_a && oplen_a >= sizeof(*sack_a) &&
+ (!sack_b || oplen_b < sizeof(*sack_b)))
+ return -1;
+ else if (sack_b && oplen_b >= sizeof(*sack_b) &&
+ (!sack_a || oplen_a < sizeof(*sack_a)))
+ return 1;
+ else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
+ (!sack_b || oplen_b < sizeof(*sack_b)))
+ return 0;
+
+ while (oplen_a >= sizeof(*sack_a)) {
+ const struct tcp_sack_block_wire *sack_tmp = sack_b;
+ u32 start_a = get_unaligned_be32(&sack_a->start_seq);
+ u32 end_a = get_unaligned_be32(&sack_a->end_seq);
+ int oplen_tmp = oplen_b;
+ bool found = false;
+
+ /* DSACK; always considered greater to prevent dropping */
+ if (before(start_a, ack_seq_a))
+ return -1;
+
+ bytes_a += end_a - start_a;
+
+ while (oplen_tmp >= sizeof(*sack_tmp)) {
+ u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
+ u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
+
+ /* first time through we count the total size */
+ if (first)
+ bytes_b += end_b - start_b;
+
+ if (!after(start_b, start_a) && !before(end_b, end_a)) {
+ found = true;
+ if (!first)
+ break;
+ }
+ oplen_tmp -= sizeof(*sack_tmp);
+ sack_tmp++;
+ }
+
+ if (!found)
+ return -1;
+
+ oplen_a -= sizeof(*sack_a);
+ sack_a++;
+ first = false;
+ }
+
+ /* If we made it this far, all ranges SACKed by A are covered by B, so
+ * either the SACKs are equal, or B SACKs more bytes.
+ */
+ return bytes_b > bytes_a ? 1 : 0;
+}
+
+static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
+ u32 *tsval, u32 *tsecr)
+{
+ const u8 *ptr;
+ int opsize;
+
+ ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
+
+ if (ptr && opsize == TCPOLEN_TIMESTAMP) {
+ *tsval = get_unaligned_be32(ptr);
+ *tsecr = get_unaligned_be32(ptr + 4);
+ }
+}
+
+static bool cake_tcph_may_drop(const struct tcphdr *tcph,
+ u32 tstamp_new, u32 tsecr_new)
+{
+ /* inspired by tcp_parse_options in tcp_input.c */
+ int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
+ const u8 *ptr = (const u8 *)(tcph + 1);
+ u32 tstamp, tsecr;
+
+ /* 3 reserved flags must be unset to avoid future breakage
+ * ACK must be set
+ * ECE/CWR are handled separately
+ * All other flags URG/PSH/RST/SYN/FIN must be unset
+ * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
+ * 0x00C00000 = CWR/ECE (handled separately)
+ * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
+ */
+ if (((tcp_flag_word(tcph) &
+ cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
+ return false;
+
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ if (opcode == TCPOPT_EOL)
+ break;
+ if (opcode == TCPOPT_NOP) {
+ length--;
+ continue;
+ }
+ opsize = *ptr++;
+ if (opsize < 2 || opsize > length)
+ break;
+
+ switch (opcode) {
+ case TCPOPT_MD5SIG: /* doesn't influence state */
+ break;
+
+ case TCPOPT_SACK: /* stricter checking performed later */
+ if (opsize % 8 != 2)
+ return false;
+ break;
+
+ case TCPOPT_TIMESTAMP:
+ /* only drop timestamps lower than new */
+ if (opsize != TCPOLEN_TIMESTAMP)
+ return false;
+ tstamp = get_unaligned_be32(ptr);
+ tsecr = get_unaligned_be32(ptr + 4);
+ if (after(tstamp, tstamp_new) ||
+ after(tsecr, tsecr_new))
+ return false;
+ break;
+
+ case TCPOPT_MSS: /* these should only be set on SYN */
+ case TCPOPT_WINDOW:
+ case TCPOPT_SACK_PERM:
+ case TCPOPT_FASTOPEN:
+ case TCPOPT_EXP:
+ default: /* don't drop if any unknown options are present */
+ return false;
+ }
+
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+
+ return true;
+}
+
+static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
+ struct cake_flow *flow)
+{
+ bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
+ struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
+ struct sk_buff *skb_check, *skb_prev = NULL;
+ const struct ipv6hdr *ipv6h, *ipv6h_check;
+ unsigned char _tcph[64], _tcph_check[64];
+ const struct tcphdr *tcph, *tcph_check;
+ const struct iphdr *iph, *iph_check;
+ struct ipv6hdr _iph, _iph_check;
+ const struct sk_buff *skb;
+ int seglen, num_found = 0;
+ u32 tstamp = 0, tsecr = 0;
+ __be32 elig_flags = 0;
+ int sack_comp;
+
+ /* no other possible ACKs to filter */
+ if (flow->head == flow->tail)
+ return NULL;
+
+ skb = flow->tail;
+ tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
+ iph = cake_get_iphdr(skb, &_iph);
+ if (!tcph)
+ return NULL;
+
+ cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
+
+ /* the 'triggering' packet need only have the ACK flag set.
+ * also check that SYN is not set, as there won't be any previous ACKs.
+ */
+ if ((tcp_flag_word(tcph) &
+ (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
+ return NULL;
+
+ /* the 'triggering' ACK is at the tail of the queue, we have already
+ * returned if it is the only packet in the flow. loop through the rest
+ * of the queue looking for pure ACKs with the same 5-tuple as the
+ * triggering one.
+ */
+ for (skb_check = flow->head;
+ skb_check && skb_check != skb;
+ skb_prev = skb_check, skb_check = skb_check->next) {
+ iph_check = cake_get_iphdr(skb_check, &_iph_check);
+ tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
+ sizeof(_tcph_check));
+
+ /* only TCP packets with matching 5-tuple are eligible, and only
+ * drop safe headers
+ */
+ if (!tcph_check || iph->version != iph_check->version ||
+ tcph_check->source != tcph->source ||
+ tcph_check->dest != tcph->dest)
+ continue;
+
+ if (iph_check->version == 4) {
+ if (iph_check->saddr != iph->saddr ||
+ iph_check->daddr != iph->daddr)
+ continue;
+
+ seglen = ntohs(iph_check->tot_len) -
+ (4 * iph_check->ihl);
+ } else if (iph_check->version == 6) {
+ ipv6h = (struct ipv6hdr *)iph;
+ ipv6h_check = (struct ipv6hdr *)iph_check;
+
+ if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
+ ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
+ continue;
+
+ seglen = ntohs(ipv6h_check->payload_len);
+ } else {
+ WARN_ON(1); /* shouldn't happen */
+ continue;
+ }
+
+ /* If the ECE/CWR flags changed from the previous eligible
+ * packet in the same flow, we should no longer be dropping that
+ * previous packet as this would lose information.
+ */
+ if (elig_ack && (tcp_flag_word(tcph_check) &
+ (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
+ elig_ack = NULL;
+ elig_ack_prev = NULL;
+ num_found--;
+ }
+
+ /* Check TCP options and flags, don't drop ACKs with segment
+ * data, and don't drop ACKs with a higher cumulative ACK
+ * counter than the triggering packet. Check ACK seqno here to
+ * avoid parsing SACK options of packets we are going to exclude
+ * anyway.
+ */
+ if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
+ (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
+ after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
+ continue;
+
+ /* Check SACK options. The triggering packet must SACK more data
+ * than the ACK under consideration, or SACK the same range but
+ * have a larger cumulative ACK counter. The latter is a
+ * pathological case, but is contained in the following check
+ * anyway, just to be safe.
+ */
+ sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
+
+ if (sack_comp < 0 ||
+ (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
+ sack_comp == 0))
+ continue;
+
+ /* At this point we have found an eligible pure ACK to drop; if
+ * we are in aggressive mode, we are done. Otherwise, keep
+ * searching unless this is the second eligible ACK we
+ * found.
+ *
+ * Since we want to drop ACK closest to the head of the queue,
+ * save the first eligible ACK we find, even if we need to loop
+ * again.
+ */
+ if (!elig_ack) {
+ elig_ack = skb_check;
+ elig_ack_prev = skb_prev;
+ elig_flags = (tcp_flag_word(tcph_check)
+ & (TCP_FLAG_ECE | TCP_FLAG_CWR));
+ }
+
+ if (num_found++ > 0)
+ goto found;
+ }
+
+ /* We made it through the queue without finding two eligible ACKs . If
+ * we found a single eligible ACK we can drop it in aggressive mode if
+ * we can guarantee that this does not interfere with ECN flag
+ * information. We ensure this by dropping it only if the enqueued
+ * packet is consecutive with the eligible ACK, and their flags match.
+ */
+ if (elig_ack && aggressive && elig_ack->next == skb &&
+ (elig_flags == (tcp_flag_word(tcph) & (TCP_FLAG_ECE | TCP_FLAG_CWR))))
+ goto found;
+
+ return NULL;
+
+found:
+ if (elig_ack_prev)
+ elig_ack_prev->next = elig_ack->next;
+ else
+ flow->head = elig_ack->next;
+
+ elig_ack->next = NULL;
+
+ return elig_ack;
+}
+
static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
{
avg -= avg >> shift;
@@ -934,6 +1360,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct cake_sched_data *q = qdisc_priv(sch);
int len = qdisc_pkt_len(skb);
int uninitialized_var(ret);
+ struct sk_buff *ack = NULL;
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
@@ -980,8 +1407,24 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
cobalt_set_enqueue_time(skb, now);
flow_queue_add(flow, skb);
- sch->q.qlen++;
- q->buffer_used += skb->truesize;
+ if (q->ack_filter)
+ ack = cake_ack_filter(q, flow);
+
+ if (ack) {
+ b->ack_drops++;
+ sch->qstats.drops++;
+ b->bytes += qdisc_pkt_len(ack);
+ len -= qdisc_pkt_len(ack);
+ q->buffer_used += skb->truesize - ack->truesize;
+ if (q->rate_flags & CAKE_FLAG_INGRESS)
+ cake_advance_shaper(q, b, ack, now, true);
+
+ qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+ consume_skb(ack);
+ } else {
+ sch->q.qlen++;
+ q->buffer_used += skb->truesize;
+ }
/* stats */
b->packets++;
@@ -1511,6 +1954,9 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
q->rate_flags &= ~CAKE_FLAG_INGRESS;
}
+ if (tb[TCA_CAKE_ACK_FILTER])
+ q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
+
if (tb[TCA_CAKE_MEMORY])
q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
@@ -1642,6 +2088,9 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
!!(q->rate_flags & CAKE_FLAG_INGRESS)))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
+ goto nla_put_failure;
+
return nla_nest_end(skb, opts);
nla_put_failure:
Powered by blists - more mailing lists