lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <152527387324.14936.10258520847821060114.stgit@alrua-kau>
Date:   Wed, 02 May 2018 17:11:13 +0200
From:   Toke Høiland-Jørgensen <toke@...e.dk>
To:     netdev@...r.kernel.org
Cc:     cake@...ts.bufferbloat.net
Subject: [PATCH net-next v7 3/7] sch_cake: Add optional ACK filter

The ACK filter is an optional feature of CAKE which is designed to improve
performance on links with very asymmetrical rate limits. On such links
(which are unfortunately quite prevalent, especially for DSL and cable
subscribers), the downstream throughput can be limited by the number of
ACKs capable of being transmitted in the *upstream* direction.

Filtering ACKs can, in general, have adverse effects on TCP performance
because it interferes with ACK clocking (especially in slow start), and it
reduces the flow's resiliency to ACKs being dropped further along the path.
To alleviate these drawbacks, the ACK filter in CAKE tries its best to
always keep enough ACKs queued to ensure forward progress in the TCP flow
being filtered. It does this by only filtering redundant ACKs. In its
default 'conservative' mode, the filter will always keep at least two
redundant ACKs in the queue, while in 'aggressive' mode, it will filter
down to a single ACK.

The ACK filter works by inspecting the per-flow queue on every packet
enqueue. Starting at the head of the queue, the filter looks for another
eligible packet to drop (so the ACK being dropped is always closer to the
head of the queue than the packet being enqueued). An ACK is eligible only
if it ACKs *fewer* cumulative bytes than the new packet being enqueued.
This prevents duplicate ACKs from being filtered (unless there is also SACK
options present), to avoid interfering with retransmission logic. In
aggressive mode, an eligible packet is always dropped, while in
conservative mode, at least two ACKs are kept in the queue. Only pure ACKs
(with no data segments) are considered eligible for dropping, but when an
ACK with data segments is enqueued, this can cause another pure ACK to
become eligible for dropping.

The approach described above ensures that this ACK filter avoids most of
the drawbacks of a naive filtering mechanism that only keeps flow state but
does not inspect the queue. This is the rationale for including the ACK
filter in CAKE itself rather than as separate module (as the TC filter, for
instance).

Our performance evaluation has shown that on a 30/1 Mbps link with a
bidirectional traffic test (RRUL), turning on the ACK filter on the
upstream link improves downstream throughput by ~20% (both modes) and
upstream throughput by ~12% in conservative mode and ~40% in aggressive
mode, at the cost of ~5ms of inter-flow latency due to the increased
congestion.

In *really* pathological cases, the effect can be a lot more; for instance,
the ACK filter increases the achievable downstream throughput on a link
with 100 Kbps in the upstream direction by an order of magnitude (from ~2.5
Mbps to ~25 Mbps).

Finally, even though we consider the ACK filter to be safer than most, we
do not recommend turning it on everywhere: on more symmetrical link
bandwidths the effect is negligible at best.

Signed-off-by: Toke Høiland-Jørgensen <toke@...e.dk>
---
 net/sched/sch_cake.c |  354 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 352 insertions(+), 2 deletions(-)

diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index a1dacc20c2b2..a412db9b647e 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -733,6 +733,336 @@ flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
 	skb->next = NULL;
 }
 
+static inline struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
+					   struct ipv6hdr *buf)
+{
+	unsigned int offset = skb_network_offset(skb);
+	struct iphdr *iph;
+
+	iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
+
+	if (!iph)
+		return NULL;
+
+	if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
+		return skb_header_pointer(skb, offset + iph->ihl * 4,
+					  sizeof(struct ipv6hdr), buf);
+
+	else if (iph->version == 4)
+		return iph;
+
+	else if (iph->version == 6)
+		return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
+					  buf);
+
+	return NULL;
+}
+
+static inline struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
+					     void *buf, unsigned int bufsize)
+{
+	unsigned int offset = skb_network_offset(skb);
+	const struct ipv6hdr *ipv6h;
+	const struct tcphdr *tcph;
+	const struct iphdr *iph;
+	struct ipv6hdr _ipv6h;
+	struct tcphdr _tcph;
+
+	ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+
+	if (!ipv6h)
+		return NULL;
+
+	if (ipv6h->version == 4) {
+		iph = (struct iphdr *)ipv6h;
+		offset += iph->ihl * 4;
+
+		/* special-case 6in4 tunnelling, as that is a common way to get
+		 * v6 connectivity in the home
+		 */
+		if (iph->protocol == IPPROTO_IPV6) {
+			ipv6h = skb_header_pointer(skb, offset,
+						   sizeof(_ipv6h), &_ipv6h);
+
+			if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
+				return NULL;
+
+			offset += sizeof(struct ipv6hdr);
+
+		} else if (iph->protocol != IPPROTO_TCP) {
+			return NULL;
+		}
+
+	} else if (ipv6h->version == 6) {
+		if (ipv6h->nexthdr != IPPROTO_TCP)
+			return NULL;
+
+		offset += sizeof(struct ipv6hdr);
+	} else {
+		return NULL;
+	}
+
+	tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+	if (!tcph)
+		return NULL;
+
+	return skb_header_pointer(skb, offset,
+				  min(__tcp_hdrlen(tcph), bufsize), buf);
+}
+
+static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
+				       struct cake_flow *flow)
+{
+	bool thisconn_redundant_seen = false, thisconn_seen_last = false;
+	bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
+	bool otherconn_ack_seen = false;
+	struct sk_buff *skb_check, *skb_check_prev;
+	struct sk_buff *otherconn_checked_to = NULL;
+	struct sk_buff *thisconn_checked_to = NULL;
+	struct sk_buff *thisconn_ack = NULL;
+	const struct ipv6hdr *ipv6h, *ipv6h_check;
+	const struct tcphdr *tcph, *tcph_check;
+	const struct iphdr *iph, *iph_check;
+	const struct sk_buff *skb;
+	struct ipv6hdr _iph, _iph_check;
+	struct tcphdr _tcph_check;
+	unsigned char _tcph[64]; /* need to hold maximum hdr size */
+	int seglen;
+
+	/* no other possible ACKs to filter */
+	if (flow->head == flow->tail)
+		return NULL;
+
+	skb = flow->tail;
+	tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
+	iph = cake_get_iphdr(skb, &_iph);
+	if (!tcph)
+		return NULL;
+
+	/* the 'triggering' packet need only have the ACK flag set.
+	 * also check that SYN is not set, as there won't be any previous ACKs.
+	 */
+	if ((tcp_flag_word(tcph) &
+	     (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
+		return NULL;
+
+	/* the 'triggering' ACK is at the end of the queue,
+	 * we have already returned if it is the only packet in the flow.
+	 * stop before last packet in queue, don't compare trigger ACK to itself
+	 * start where we finished last time if recorded in ->ackcheck
+	 * otherwise start from the the head of the flow queue.
+	 */
+	skb_check_prev = flow->ackcheck;
+	skb_check = flow->ackcheck ?: flow->head;
+
+	while (skb_check->next) {
+		bool pure_ack, thisconn;
+
+		/* don't increment if at head of flow queue (_prev == NULL) */
+		if (skb_check_prev) {
+			skb_check_prev = skb_check;
+			skb_check = skb_check->next;
+			if (!skb_check->next)
+				break;
+		} else {
+			skb_check_prev = ERR_PTR(-1);
+		}
+
+		iph_check = cake_get_iphdr(skb_check, &_iph_check);
+		tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
+					     sizeof(_tcph_check));
+
+		if (!tcph_check || iph->version != iph_check->version)
+			continue;
+
+		if (iph->version == 4) {
+			seglen = ntohs(iph_check->tot_len) -
+				       (4 * iph_check->ihl);
+
+			thisconn = (iph_check->saddr == iph->saddr &&
+				    iph_check->daddr == iph->daddr);
+		} else if (iph->version == 6) {
+			ipv6h = (struct ipv6hdr *)iph;
+			ipv6h_check = (struct ipv6hdr *)iph_check;
+			seglen = ntohs(ipv6h_check->payload_len);
+
+			thisconn = (!ipv6_addr_cmp(&ipv6h_check->saddr,
+						   &ipv6h->saddr) &&
+				    !ipv6_addr_cmp(&ipv6h_check->daddr,
+						   &ipv6h->daddr));
+		} else {
+			WARN_ON(1);  /* shouldn't happen */
+			continue;
+		}
+
+		/* stricter criteria apply to ACKs that we may filter
+		 * 3 reserved flags must be unset to avoid future breakage
+		 * ECE/CWR/NS can be safely ignored
+		 * ACK must be set
+		 * All other flags URG/PSH/RST/SYN/FIN must be unset
+		 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
+		 * 0x01C00000 = NS/CWR/ECE (safe to ignore)
+		 * 0x0E3F0000 = 0x0FFF0000 & ~0x01C00000
+		 * must be 'pure' ACK, contain zero bytes of segment data
+		 * options are ignored
+		 */
+		if ((tcp_flag_word(tcph_check) &
+		     (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
+			continue;
+
+		else if (((tcp_flag_word(tcph_check) &
+			   cpu_to_be32(0x0E3F0000)) != TCP_FLAG_ACK) ||
+			 ((seglen - __tcp_hdrlen(tcph_check)) != 0))
+			pure_ack = false;
+
+		else
+			pure_ack = true;
+
+		/* if we find an ACK belonging to a different connection
+		 * continue checking for other ACKs this round however
+		 * restart checking from the other connection next time.
+		 */
+		if (thisconn &&	(tcph_check->source != tcph->source ||
+				 tcph_check->dest != tcph->dest))
+			thisconn = false;
+
+		/* new ack sequence must be greater
+		 */
+		if (thisconn &&
+		    ((int32_t)(ntohl(tcph_check->ack_seq) -
+			       ntohl(tcph->ack_seq)) > 0))
+			continue;
+
+		/* DupACKs with an equal sequence number shouldn't be filtered,
+		 * but we can filter if the triggering packet is a SACK
+		 */
+		if (thisconn &&
+		    (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq))) {
+			/* inspired by tcp_parse_options in tcp_input.c */
+			bool sack = false;
+			int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
+			const u8 *ptr = (const u8 *)(tcph + 1);
+
+			while (length > 0) {
+				int opcode = *ptr++;
+				int opsize;
+
+				if (opcode == TCPOPT_EOL)
+					break;
+				if (opcode == TCPOPT_NOP) {
+					length--;
+					continue;
+				}
+				opsize = *ptr++;
+				if (opsize < 2 || opsize > length)
+					break;
+				if (opcode == TCPOPT_SACK) {
+					sack = true;
+					break;
+				}
+				ptr += opsize - 2;
+				length -= opsize;
+			}
+			if (!sack)
+				continue;
+		}
+
+		/* somewhat complicated control flow for 'conservative'
+		 * ACK filtering that aims to be more polite to slow-start and
+		 * in the presence of packet loss.
+		 * does not filter if there is one 'redundant' ACK in the queue.
+		 * 'data' ACKs won't be filtered but do count as redundant ACKs.
+		 */
+		if (thisconn) {
+			thisconn_seen_last = true;
+			/* if aggressive and this is a data ack we can skip
+			 * checking it next time.
+			 */
+			thisconn_checked_to = (aggressive && !pure_ack) ?
+				skb_check : skb_check_prev;
+			/* the first pure ack for this connection.
+			 * record where it is, but only break if aggressive
+			 * or already seen data ack from the same connection
+			 */
+			if (pure_ack && !thisconn_ack) {
+				thisconn_ack = skb_check_prev;
+				if (aggressive || thisconn_redundant_seen)
+					break;
+			/* data ack or subsequent pure ack */
+			} else {
+				thisconn_redundant_seen = true;
+				/* this is the second ack for this connection
+				 * break to filter the first pure ack
+				 */
+				if (thisconn_ack)
+					break;
+			}
+		/* track packets from non-matching tcp connections that will
+		 * need evaluation on the next run.
+		 * if there are packets from both the matching connection and
+		 * others that requre checking next run, track which was updated
+		 * last and return the older of the two to ensure full coverage.
+		 * if a non-matching pure ack has been seen, cannot skip any
+		 * further on the next run so don't update.
+		 */
+		} else if (!otherconn_ack_seen) {
+			thisconn_seen_last = false;
+			if (pure_ack) {
+				otherconn_ack_seen = true;
+				/* if aggressive we don't care about old data,
+				 * start from the pure ack.
+				 * otherwise if there is a previous data ack,
+				 * start checking from it next time.
+				 */
+				if (aggressive || !otherconn_checked_to)
+					otherconn_checked_to = skb_check_prev;
+			} else {
+				otherconn_checked_to = aggressive ?
+					skb_check : skb_check_prev;
+			}
+		}
+	}
+
+	/* skb_check is reused at this point
+	 * it is the pure ACK to be filtered (if any)
+	 */
+	skb_check = NULL;
+
+	/* next time start checking from the older/nearest to head of unfiltered
+	 * but important tcp packets from this connection and other connections.
+	 * if none seen, start after the last packet evaluated in the loop.
+	 */
+	if (thisconn_checked_to && otherconn_checked_to)
+		flow->ackcheck = thisconn_seen_last ?
+			otherconn_checked_to : thisconn_checked_to;
+	else if (thisconn_checked_to)
+		flow->ackcheck = thisconn_checked_to;
+	else if (otherconn_checked_to)
+		flow->ackcheck = otherconn_checked_to;
+	else
+		flow->ackcheck = skb_check_prev;
+
+	/* if filtering, remove the pure ACK from the flow queue */
+	if (thisconn_ack && (aggressive || thisconn_redundant_seen)) {
+		if (PTR_ERR(thisconn_ack) == -1) {
+			skb_check = flow->head;
+			flow->head = flow->head->next;
+		} else {
+			skb_check = thisconn_ack->next;
+			thisconn_ack->next = thisconn_ack->next->next;
+		}
+	}
+
+	/* we just filtered that ack, fix up the list */
+	if (flow->ackcheck == skb_check)
+		flow->ackcheck = thisconn_ack;
+	/* check the entire flow queue next time */
+	if (PTR_ERR(flow->ackcheck) == -1)
+		flow->ackcheck = NULL;
+
+	return skb_check;
+}
+
 static inline cobalt_time_t cake_ewma(cobalt_time_t avg, cobalt_time_t sample,
 				      u32 shift)
 {
@@ -909,6 +1239,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	/* signed len to handle corner case filtered ACK larger than trigger */
 	int len = qdisc_pkt_len(skb);
 	u64 now = cobalt_get_time();
+	struct sk_buff *ack = NULL;
 
 	tin = 0;
 	b = &q->tins[tin];
@@ -942,8 +1273,24 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	cobalt_set_enqueue_time(skb, now);
 	flow_queue_add(flow, skb);
 
-	sch->q.qlen++;
-	q->buffer_used      += skb->truesize;
+	if (q->ack_filter)
+		ack = cake_ack_filter(q, flow);
+
+	if (ack) {
+		b->ack_drops++;
+		sch->qstats.drops++;
+		b->bytes += qdisc_pkt_len(ack);
+		len -= qdisc_pkt_len(ack);
+		q->buffer_used += skb->truesize - ack->truesize;
+		if (q->rate_flags & CAKE_FLAG_INGRESS)
+			cake_advance_shaper(q, b, ack, now, true);
+
+		qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
+		consume_skb(ack);
+	} else {
+		sch->q.qlen++;
+		q->buffer_used      += skb->truesize;
+	}
 
 	/* stats */
 	b->packets++;
@@ -1456,6 +1803,9 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
 			q->rate_flags &= ~CAKE_FLAG_INGRESS;
 	}
 
+	if (tb[TCA_CAKE_ACK_FILTER])
+		q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
+
 	if (tb[TCA_CAKE_MEMORY])
 		q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ