[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1500213406.545279174@decadent.org.uk>
Date: Sun, 16 Jul 2017 14:56:46 +0100
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org,
"Daniel Borkmann" <daniel@...earbox.net>,
"Ashhad Sheikh" <ashhadsheikh394@...il.com>,
"David S. Miller" <davem@...emloft.net>,
"Alexei Starovoitov" <ast@...nel.org>
Subject: [PATCH 3.16 074/178] bpf: try harder on clones when writing into skb
3.16.46-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: Daniel Borkmann <daniel@...earbox.net>
commit 3697649ff29e0f647565eed04b27a7779c646a22 upstream.
When we're dealing with clones and the area is not writeable, try
harder and get a copy via pskb_expand_head(). Replace also other
occurences in tc actions with the new skb_try_make_writable().
Reported-by: Ashhad Sheikh <ashhadsheikh394@...il.com>
Signed-off-by: Daniel Borkmann <daniel@...earbox.net>
Acked-by: Alexei Starovoitov <ast@...nel.org>
Signed-off-by: David S. Miller <davem@...emloft.net>
[bwh: Backported to 3.16: drop changes to bpf; only tc actions need fixing]
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2307,6 +2307,13 @@ static inline int skb_clone_writable(con
skb_headroom(skb) + len <= skb->hdr_len;
}
+static inline int skb_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+{
+ return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -104,9 +104,7 @@ static void *tcf_csum_skb_nextlayer(stru
int hl = ihl + jhl;
if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, hl + ntkoff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, hl + ntkoff))
return NULL;
else
return (void *)(skb_network_header(skb) + ihl);
@@ -364,9 +362,7 @@ static int tcf_csum_ipv4(struct sk_buff
}
if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
goto fail;
ip_send_check(ip_hdr(skb));
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,9 +125,7 @@ static int tcf_nat(struct sk_buff *skb,
addr = iph->daddr;
if (!((old_addr ^ addr) & mask)) {
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, sizeof(*iph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, sizeof(*iph) + noff))
goto drop;
new_addr &= mask;
@@ -155,9 +153,7 @@ static int tcf_nat(struct sk_buff *skb,
struct tcphdr *tcph;
if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
goto drop;
tcph = (void *)(skb_network_header(skb) + ihl);
@@ -169,9 +165,7 @@ static int tcf_nat(struct sk_buff *skb,
struct udphdr *udph;
if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
goto drop;
udph = (void *)(skb_network_header(skb) + ihl);
@@ -211,10 +205,8 @@ static int tcf_nat(struct sk_buff *skb,
if ((old_addr ^ addr) & mask)
break;
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*icmph) +
- sizeof(*iph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
+ sizeof(*iph) + noff))
goto drop;
icmph = (void *)(skb_network_header(skb) + ihl);
Powered by blists - more mailing lists