[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1453087115.809467038@decadent.org.uk>
Date: Mon, 18 Jan 2016 03:18:35 +0000
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org
Subject: [PATCH 3.2 45/70] Revert "net: add length argument to
skb_copy_and_csum_datagram_iovec"
3.2.76-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: Ben Hutchings <ben@...adent.org.uk>
This reverts commit 127500d724f8c43f452610c9080444eedb5eaa6c. That fixed
the problem of buffer over-reads introduced by backporting commit
89c22d8c3b27 ("net: Fix skb csum races when peeking"), but resulted in
incorrect checksumming for short reads. It will be replaced with a
complete fix.
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2134,8 +2134,7 @@ extern int skb_copy_datagram_iove
int size);
extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
int hlen,
- struct iovec *iov,
- int len);
+ struct iovec *iov);
extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
int offset,
const struct iovec *from,
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -709,7 +709,6 @@ EXPORT_SYMBOL(__skb_checksum_complete);
* @skb: skbuff
* @hlen: hardware length
* @iov: io vector
- * @len: amount of data to copy from skb to iov
*
* Caller _must_ check that skb will fit to this iovec.
*
@@ -719,14 +718,11 @@ EXPORT_SYMBOL(__skb_checksum_complete);
* can be modified!
*/
int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
- int hlen, struct iovec *iov, int len)
+ int hlen, struct iovec *iov)
{
__wsum csum;
int chunk = skb->len - hlen;
- if (chunk > len)
- chunk = len;
-
if (!chunk)
return 0;
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5198,7 +5198,7 @@ static int tcp_copy_to_iovec(struct sock
err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
else
err = skb_copy_and_csum_datagram_iovec(skb, hlen,
- tp->ucopy.iov, chunk);
+ tp->ucopy.iov);
if (!err) {
tp->ucopy.len -= chunk;
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1207,7 +1207,7 @@ try_again:
else {
err = skb_copy_and_csum_datagram_iovec(skb,
sizeof(struct udphdr),
- msg->msg_iov, copied);
+ msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -479,7 +479,7 @@ static int rawv6_recvmsg(struct kiocb *i
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else {
- err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -383,8 +383,7 @@ try_again:
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
msg->msg_iov, copied );
else {
- err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied);
+ err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -185,8 +185,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, st
msg->msg_iov, copy);
} else {
ret = skb_copy_and_csum_datagram_iovec(skb, offset,
- msg->msg_iov,
- copy);
+ msg->msg_iov);
if (ret == -EINVAL)
goto csum_copy_error;
}
Powered by blists - more mailing lists