lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1228510736-3655-4-git-send-email-ilpo.jarvinen@helsinki.fi>
Date:	Fri,  5 Dec 2008 22:58:49 +0200
From:	"Ilpo Järvinen" <ilpo.jarvinen@...sinki.fi>
To:	David Miller <davem@...emloft.net>
Cc:	netdev@...r.kernel.org,
	"Ilpo Järvinen" <ilpo.jarvinen@...sinki.fi>
Subject: [PATCH 03/10] tcp: make mtu probe failure to not break gso'ed skbs unnecessarily

I noticed that since skb->len has nothing to do with actual segment
length with gso, we need to figure it out separately, reuse
a function from the recent shifting stuff (generalize it).

Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@...sinki.fi>
---
 net/ipv4/tcp_input.c |   19 +++++++------------
 1 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 33902f6..21c6701 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1445,14 +1445,9 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
 /* I wish gso_size would have a bit more sane initialization than
  * something-or-zero which complicates things
  */
-static int tcp_shift_mss(struct sk_buff *skb)
+static int tcp_skb_seglen(struct sk_buff *skb)
 {
-	int mss = tcp_skb_mss(skb);
-
-	if (!mss)
-		mss = skb->len;
-
-	return mss;
+	return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
 }
 
 /* Shifting pages past head area doesn't work */
@@ -1503,12 +1498,12 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
 	if (in_sack) {
 		len = skb->len;
 		pcount = tcp_skb_pcount(skb);
-		mss = tcp_shift_mss(skb);
+		mss = tcp_skb_seglen(skb);
 
 		/* TODO: Fix DSACKs to not fragment already SACKed and we can
 		 * drop this restriction as unnecessary
 		 */
-		if (mss != tcp_shift_mss(prev))
+		if (mss != tcp_skb_seglen(prev))
 			goto fallback;
 	} else {
 		if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
@@ -1549,7 +1544,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
 		/* TODO: Fix DSACKs to not fragment already SACKed and we can
 		 * drop this restriction as unnecessary
 		 */
-		if (mss != tcp_shift_mss(prev))
+		if (mss != tcp_skb_seglen(prev))
 			goto fallback;
 
 		if (len == mss) {
@@ -1578,7 +1573,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
 	if (!skb_can_shift(skb) ||
 	    (skb == tcp_send_head(sk)) ||
 	    ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
-	    (mss != tcp_shift_mss(skb)))
+	    (mss != tcp_skb_seglen(skb)))
 		goto out;
 
 	len = skb->len;
@@ -2853,7 +2848,7 @@ void tcp_simple_retransmit(struct sock *sk)
 	tcp_for_write_queue(skb, sk) {
 		if (skb == tcp_send_head(sk))
 			break;
-		if (skb->len > mss &&
+		if (tcp_skb_seglen(skb) > mss &&
 		    !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
 			if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
-- 
1.5.2.2

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ