lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 21 Mar 2021 18:01:12 +0100
From:   Paolo Abeni <pabeni@...hat.com>
To:     netdev@...r.kernel.org
Cc:     "David S. Miller" <davem@...emloft.net>,
        Jakub Kicinski <kuba@...nel.org>,
        Steffen Klassert <steffen.klassert@...unet.com>,
        Willem de Bruijn <willemb@...gle.com>,
        Alexander Lobakin <alobakin@...me>
Subject: [PATCH net-next 1/8] udp: fixup csum for GSO receive slow path

When looping back UDP GSO over UDP tunnel packets to an UDP socket,
the individual packet csum is currently set to CSUM_NONE. That causes
unexpected/wrong csum validation errors later in the UDP receive path.

We could possibly addressing the issue with some additional check and
csum mangling in the UDP tunnel code. Since the issue affects only
this UDP receive slow path, let's set a suitable csum status there.

Signed-off-by: Paolo Abeni <pabeni@...hat.com>
---
 include/net/udp.h | 18 ++++++++++++++++++
 net/ipv4/udp.c    | 10 ++++++++++
 net/ipv6/udp.c    |  5 +++++
 3 files changed, 33 insertions(+)

diff --git a/include/net/udp.h b/include/net/udp.h
index d4d064c592328..007683eb3e113 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -515,6 +515,24 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
 	return segs;
 }
 
+static inline void udp_post_segment_fix_csum(struct sk_buff *skb, int level)
+{
+	/* UDP-lite can't land here - no GRO */
+	WARN_ON_ONCE(UDP_SKB_CB(skb)->partial_cov);
+
+	/* GRO already validated the csum up to 'level', and we just
+	 * consumed one header, update the skb accordingly
+	 */
+	UDP_SKB_CB(skb)->cscov = skb->len;
+	if (level) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		skb->csum_level = 0;
+	} else {
+		skb->ip_summed = CHECKSUM_NONE;
+		skb->csum_valid = 1;
+	}
+}
+
 #ifdef CONFIG_BPF_SYSCALL
 struct sk_psock;
 struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4a0478b17243a..ff54135c51ffa 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2168,6 +2168,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	struct sk_buff *next, *segs;
+	int csum_level;
 	int ret;
 
 	if (likely(!udp_unexpected_gso(sk, skb)))
@@ -2175,9 +2176,18 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
 	BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_GSO_CB_OFFSET);
 	__skb_push(skb, -skb_mac_offset(skb));
+	csum_level = !!(skb_shinfo(skb)->gso_type &
+			(SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM));
 	segs = udp_rcv_segment(sk, skb, true);
 	skb_list_walk_safe(segs, skb, next) {
 		__skb_pull(skb, skb_transport_offset(skb));
+
+		/* UDP GSO packets looped back after adding UDP encap land here with CHECKSUM none,
+		 * instead of adding another check in the tunnel fastpath, we can force valid
+		 * csums here (packets are locally generated).
+		 * Additionally fixup the UDP CB
+		 */
+		udp_post_segment_fix_csum(skb, csum_level);
 		ret = udp_queue_rcv_one_skb(sk, skb);
 		if (ret > 0)
 			ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d25e5a9252fdb..e7d4bf3a65c72 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -739,16 +739,21 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	struct sk_buff *next, *segs;
+	int csum_level;
 	int ret;
 
 	if (likely(!udp_unexpected_gso(sk, skb)))
 		return udpv6_queue_rcv_one_skb(sk, skb);
 
 	__skb_push(skb, -skb_mac_offset(skb));
+	csum_level = !!(skb_shinfo(skb)->gso_type &
+			(SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM));
 	segs = udp_rcv_segment(sk, skb, false);
 	skb_list_walk_safe(segs, skb, next) {
 		__skb_pull(skb, skb_transport_offset(skb));
 
+		/* see comments in udp_queue_rcv_skb() */
+		udp_post_segment_fix_csum(skb, csum_level);
 		ret = udpv6_queue_rcv_one_skb(sk, skb);
 		if (ret > 0)
 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
-- 
2.26.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ