lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230209013329.87879-2-kuniyu@amazon.com>
Date:   Wed, 8 Feb 2023 17:33:28 -0800
From:   Kuniyuki Iwashima <kuniyu@...zon.com>
To:     "David S. Miller" <davem@...emloft.net>,
        Eric Dumazet <edumazet@...gle.com>,
        Jakub Kicinski <kuba@...nel.org>,
        Paolo Abeni <pabeni@...hat.com>
CC:     Kuniyuki Iwashima <kuniyu@...zon.com>,
        Kuniyuki Iwashima <kuni1840@...il.com>,
        <netdev@...r.kernel.org>, Andrii <tulup@...l.ru>,
        Arnaldo Carvalho de Melo <acme@...driva.com>
Subject: [PATCH v2 net 1/2] dccp/tcp: Avoid negative sk_forward_alloc by ipv6_pinfo.pktoptions.

Eric Dumazet pointed out [0] that when we call skb_set_owner_r()
for ipv6_pinfo.pktoptions, sk_rmem_schedule() has not been called,
resulting in a negative sk_forward_alloc.

Note that in (dccp|tcp)_v6_do_rcv(), we call sk_rmem_schedule()
just after skb_clone() instead of after ipv6_opt_accepted().  This is
because tcp_send_synack() can make sk_forward_alloc negative before
ipv6_opt_accepted() in the crossed SYN-ACK or self-connect() cases.

[0]: https://lore.kernel.org/netdev/CANn89iK9oc20Jdi_41jb9URdF210r7d1Y-+uypbMSbOfY6jqrg@mail.gmail.com/

Fixes: 323fbd0edf3f ("net: dccp: Add handling of IPV6_PKTOPTIONS to dccp_v6_do_rcv()")
Fixes: 3df80d9320bc ("[DCCP]: Introduce DCCPv6")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Kuniyuki Iwashima <kuniyu@...zon.com>
---
Cc: Andrii <tulup@...l.ru>
Cc: Arnaldo Carvalho de Melo <acme@...driva.com>
---
 net/dccp/ipv6.c     | 23 +++++++++++++++++++----
 net/ipv6/tcp_ipv6.c | 22 ++++++++++++++++++----
 2 files changed, 37 insertions(+), 8 deletions(-)

diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4260fe466993..2687e7ef5b5d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -554,8 +554,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
 		newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
 		consume_skb(ireq->pktopts);
 		ireq->pktopts = NULL;
-		if (newnp->pktoptions)
-			skb_set_owner_r(newnp->pktoptions, newsk);
+		if (newnp->pktoptions) {
+			if (sk_rmem_schedule(newsk, newnp->pktoptions,
+					     newnp->pktoptions->truesize)) {
+				skb_set_owner_r(newnp->pktoptions, newsk);
+			} else {
+				__kfree_skb(newnp->pktoptions);
+				newnp->pktoptions = NULL;
+			}
+		}
 	}
 
 	return newsk;
@@ -614,8 +621,17 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 	   by tcp. Feel free to propose better solution.
 					       --ANK (980728)
 	 */
-	if (np->rxopt.all)
+	if (np->rxopt.all) {
 		opt_skb = skb_clone(skb, GFP_ATOMIC);
+		if (opt_skb) {
+			if (sk_rmem_schedule(sk, opt_skb, opt_skb->truesize)) {
+				skb_set_owner_r(opt_skb, sk);
+			} else {
+				__kfree_skb(opt_skb);
+				opt_skb = NULL;
+			}
+		}
+	}
 
 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
@@ -679,7 +695,6 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
 		if (ipv6_opt_accepted(sk, opt_skb,
 				      &DCCP_SKB_CB(opt_skb)->header.h6)) {
-			skb_set_owner_r(opt_skb, sk);
 			memmove(IP6CB(opt_skb),
 				&DCCP_SKB_CB(opt_skb)->header.h6,
 				sizeof(struct inet6_skb_parm));
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 11b736a76bd7..95c1078aba5a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1392,8 +1392,14 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
 			consume_skb(ireq->pktopts);
 			ireq->pktopts = NULL;
 			if (newnp->pktoptions) {
-				tcp_v6_restore_cb(newnp->pktoptions);
-				skb_set_owner_r(newnp->pktoptions, newsk);
+				if (sk_rmem_schedule(newsk, newnp->pktoptions,
+						     newnp->pktoptions->truesize)) {
+					tcp_v6_restore_cb(newnp->pktoptions);
+					skb_set_owner_r(newnp->pktoptions, newsk);
+				} else {
+					__kfree_skb(newnp->pktoptions);
+					newnp->pktoptions = NULL;
+				}
 			}
 		}
 	} else {
@@ -1465,8 +1471,17 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 	   by tcp. Feel free to propose better solution.
 					       --ANK (980728)
 	 */
-	if (np->rxopt.all)
+	if (np->rxopt.all) {
 		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
+		if (opt_skb) {
+			if (sk_rmem_schedule(sk, opt_skb, opt_skb->truesize)) {
+				skb_set_owner_r(opt_skb, sk);
+			} else {
+				__kfree_skb(opt_skb);
+				opt_skb = NULL;
+			}
+		}
+	}
 
 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
@@ -1552,7 +1567,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 		if (np->repflow)
 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
-			skb_set_owner_r(opt_skb, sk);
 			tcp_v6_restore_cb(opt_skb);
 			opt_skb = xchg(&np->pktoptions, opt_skb);
 		} else {
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ