[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <7b242192-a58d-a4b7-88a6-e04840e75e10@solarflare.com>
Date: Wed, 4 Jul 2018 19:23:50 +0100
From: Edward Cree <ecree@...arflare.com>
To: <davem@...emloft.net>
CC: <netdev@...r.kernel.org>
Subject: [PATCH net-next] net: ipv4: fix drop handling in ip_list_rcv() and
ip_list_rcv_finish()
Since callees (ip_rcv_core() and ip_rcv_finish_core()) might free or steal
the skb, we can't use the list_cut_before() method; we can't even do a
list_del(&skb->list) in the drop case, because skb might have already been
freed and reused.
So instead, take each skb off the source list before processing, and add it
to the sublist afterwards if it wasn't freed or stolen.
Fixes: 5fa12739a53d net: ipv4: listify ip_rcv_finish
Fixes: 17266ee93984 net: ipv4: listified version of ip_rcv
Signed-off-by: Edward Cree <ecree@...arflare.com>
---
net/ipv4/ip_input.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 24b9b0210aeb..14ba628b2761 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -540,24 +540,27 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
struct sk_buff *skb, *next;
struct list_head sublist;
+ INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct dst_entry *dst;
+ list_del(&skb->list);
if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
continue;
dst = skb_dst(skb);
if (curr_dst != dst) {
/* dispatch old sublist */
- list_cut_before(&sublist, head, &skb->list);
if (!list_empty(&sublist))
ip_sublist_rcv_finish(&sublist);
/* start new sublist */
+ INIT_LIST_HEAD(&sublist);
curr_dst = dst;
}
+ list_add_tail(&skb->list, &sublist);
}
/* dispatch final sublist */
- ip_sublist_rcv_finish(head);
+ ip_sublist_rcv_finish(&sublist);
}
static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
@@ -577,24 +580,27 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct sk_buff *skb, *next;
struct list_head sublist;
+ INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
+ list_del(&skb->list);
skb = ip_rcv_core(skb, net);
if (skb == NULL)
continue;
if (curr_dev != dev || curr_net != net) {
/* dispatch old sublist */
- list_cut_before(&sublist, head, &skb->list);
if (!list_empty(&sublist))
- ip_sublist_rcv(&sublist, dev, net);
+ ip_sublist_rcv(&sublist, curr_dev, curr_net);
/* start new sublist */
+ INIT_LIST_HEAD(&sublist);
curr_dev = dev;
curr_net = net;
}
+ list_add_tail(&skb->list, &sublist);
}
/* dispatch final sublist */
- ip_sublist_rcv(head, curr_dev, curr_net);
+ ip_sublist_rcv(&sublist, curr_dev, curr_net);
}
Powered by blists - more mailing lists