[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <168198515529.808959.12962138073127060724.stgit@firesoul>
Date: Thu, 20 Apr 2023 12:05:55 +0200
From: Jesper Dangaard Brouer <brouer@...hat.com>
To: netdev@...r.kernel.org, edumazet@...gle.com
Cc: Jesper Dangaard Brouer <brouer@...hat.com>, pabeni@...hat.com,
kuba@...nel.org, hawk@...nel.org, davem@...emloft.net,
lorenzo@...nel.org
Subject: [PATCH net-next V1] net: flush sd->defer_list on unregister_netdevice
When removing a net_device (that use NAPI), the sd->defer_list
system can still hold on to SKBs that have a dst_entry which can
have a netdev_hold reference.
Choose simple solution of flushing the softnet_data defer_list
system as part of unregister_netdevice flush_all_backlogs().
Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
---
net/core/dev.c | 26 +++++++++++++++++++++-----
1 file changed, 21 insertions(+), 5 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index 3fc4dba71f9d..b63edd2c21e4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5780,6 +5780,8 @@ EXPORT_SYMBOL(netif_receive_skb_list);
static DEFINE_PER_CPU(struct work_struct, flush_works);
+static void skb_defer_free_flush(struct softnet_data *sd, bool napi_safe);
+
/* Network device is going away, flush any packets still pending */
static void flush_backlog(struct work_struct *work)
{
@@ -5806,6 +5808,8 @@ static void flush_backlog(struct work_struct *work)
input_queue_head_incr(sd);
}
}
+
+ skb_defer_free_flush(sd, false);
local_bh_enable();
}
@@ -5824,6 +5828,9 @@ static bool flush_required(int cpu)
!skb_queue_empty_lockless(&sd->process_queue);
rps_unlock_irq_enable(sd);
+ if (READ_ONCE(sd->defer_list))
+ do_flush = true;
+
return do_flush;
#endif
/* without RPS we can't safely check input_pkt_queue: during a
@@ -6623,23 +6630,32 @@ static int napi_threaded_poll(void *data)
return 0;
}
-static void skb_defer_free_flush(struct softnet_data *sd)
+static void skb_defer_free_flush(struct softnet_data *sd, bool napi_safe)
{
struct sk_buff *skb, *next;
+ unsigned long flags;
/* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
if (!READ_ONCE(sd->defer_list))
return;
- spin_lock_irq(&sd->defer_lock);
+ if (napi_safe)
+ spin_lock_irq(&sd->defer_lock);
+ else
+ spin_lock_irqsave(&sd->defer_lock, flags);
+
skb = sd->defer_list;
sd->defer_list = NULL;
sd->defer_count = 0;
- spin_unlock_irq(&sd->defer_lock);
+
+ if (napi_safe)
+ spin_unlock_irq(&sd->defer_lock);
+ else
+ spin_unlock_irqrestore(&sd->defer_lock, flags);
while (skb != NULL) {
next = skb->next;
- napi_consume_skb(skb, 1);
+ napi_consume_skb(skb, napi_safe);
skb = next;
}
}
@@ -6662,7 +6678,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
for (;;) {
struct napi_struct *n;
- skb_defer_free_flush(sd);
+ skb_defer_free_flush(sd, true);
if (list_empty(&list)) {
if (list_empty(&repoll)) {
Powered by blists - more mailing lists