[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220713205509.2a79563a@kernel.org>
Date: Wed, 13 Jul 2022 20:55:09 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: Taehee Yoo <ap420073@...il.com>
Cc: davem@...emloft.net, pabeni@...hat.com, edumazet@...gle.com,
netdev@...r.kernel.org
Subject: Re: [PATCH net 1/8] amt: use workqueue for gateway side message
handling
On Tue, 12 Jul 2022 10:57:07 +0000 Taehee Yoo wrote:
> @@ -2392,12 +2429,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
> skb->pkt_type = PACKET_MULTICAST;
> skb->ip_summed = CHECKSUM_NONE;
> len = skb->len;
> + rcu_read_lock_bh();
> if (__netif_rx(skb) == NET_RX_SUCCESS) {
> amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
> dev_sw_netstats_rx_add(amt->dev, len);
> } else {
> amt->dev->stats.rx_dropped++;
> }
> + rcu_read_unlock_bh();
>
> return false;
> }
The RCU lock addition looks potentially unrelated?
> @@ -2892,10 +3007,21 @@ static int amt_dev_stop(struct net_device *dev)
> struct amt_dev *amt = netdev_priv(dev);
> struct amt_tunnel_list *tunnel, *tmp;
> struct socket *sock;
> + struct sk_buff *skb;
> + int i;
>
> cancel_delayed_work_sync(&amt->req_wq);
> cancel_delayed_work_sync(&amt->discovery_wq);
> cancel_delayed_work_sync(&amt->secret_wq);
> + cancel_work_sync(&amt->event_wq);
Are you sure the work will not get scheduled again?
What has stopped packet Rx at this point?
> + for (i = 0; i < AMT_MAX_EVENTS; i++) {
> + skb = amt->events[i].skb;
> + if (skb)
> + kfree_skb(skb);
> + amt->events[i].event = AMT_EVENT_NONE;
> + amt->events[i].skb = NULL;
> + }
>
> /* shutdown */
> sock = rtnl_dereference(amt->sock);
> @@ -3051,6 +3177,8 @@ static int amt_newlink(struct net *net, struct net_device *dev,
> amt->max_tunnels = AMT_MAX_TUNNELS;
>
> spin_lock_init(&amt->lock);
> + amt->event_idx = 0;
> + amt->nr_events = 0;
no need to init member of netdev_priv() to 0, it's zalloc'ed
> amt->max_groups = AMT_MAX_GROUP;
> amt->max_sources = AMT_MAX_SOURCE;
> amt->hash_buckets = AMT_HSIZE;
Powered by blists - more mailing lists