lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ed9d91da-621d-5fbf-b836-76c80db013c7@redhat.com>
Date:   Mon, 6 Feb 2017 14:48:42 +0800
From:   Jason Wang <jasowang@...hat.com>
To:     John Fastabend <john.fastabend@...il.com>, kubakici@...pl,
        ast@...com, mst@...hat.com
Cc:     john.r.fastabend@...el.com, netdev@...r.kernel.org
Subject: Re: [net-next PATCH v2 1/5] virtio_net: wrap rtnl_lock in test for
 calling with lock already held



On 2017年02月03日 11:14, John Fastabend wrote:
> For XDP use case and to allow ethtool reset tests it is useful to be
> able to use reset paths from contexts where rtnl lock is already
> held.
>
> This requries updating virtnet_set_queues and free_receive_bufs the
> two places where rtnl_lock is taken in virtio_net. To do this we
> use the following pattern,
>
> 	_foo(...) { do stuff }
> 	foo(...) { rtnl_lock(); _foo(...); rtnl_unlock()};
>
> this allows us to use freeze()/restore() flow from both contexts.
>
> Signed-off-by: John Fastabend <john.r.fastabend@...el.com>

Acked-by: Jason Wang <jasowang@...hat.com>

> ---
>   drivers/net/virtio_net.c |   31 +++++++++++++++++++++----------
>   1 file changed, 21 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index bd22cf3..f8ba586 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1342,7 +1342,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
>   	rtnl_unlock();
>   }
>   
> -static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
> +static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>   {
>   	struct scatterlist sg;
>   	struct net_device *dev = vi->dev;
> @@ -1368,6 +1368,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>   	return 0;
>   }
>   
> +static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
> +{
> +	int err;
> +
> +	rtnl_lock();
> +	err = _virtnet_set_queues(vi, queue_pairs);
> +	rtnl_unlock();
> +	return err;
> +}
> +
>   static int virtnet_close(struct net_device *dev)
>   {
>   	struct virtnet_info *vi = netdev_priv(dev);
> @@ -1620,7 +1630,7 @@ static int virtnet_set_channels(struct net_device *dev,
>   		return -EINVAL;
>   
>   	get_online_cpus();
> -	err = virtnet_set_queues(vi, queue_pairs);
> +	err = _virtnet_set_queues(vi, queue_pairs);
>   	if (!err) {
>   		netif_set_real_num_tx_queues(dev, queue_pairs);
>   		netif_set_real_num_rx_queues(dev, queue_pairs);
> @@ -1752,7 +1762,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
>   		return -ENOMEM;
>   	}
>   
> -	err = virtnet_set_queues(vi, curr_qp + xdp_qp);
> +	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
>   	if (err) {
>   		dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
>   		return err;
> @@ -1761,7 +1771,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
>   	if (prog) {
>   		prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
>   		if (IS_ERR(prog)) {
> -			virtnet_set_queues(vi, curr_qp);
> +			_virtnet_set_queues(vi, curr_qp);
>   			return PTR_ERR(prog);
>   		}
>   	}
> @@ -1880,12 +1890,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
>   	kfree(vi->sq);
>   }
>   
> -static void free_receive_bufs(struct virtnet_info *vi)
> +static void _free_receive_bufs(struct virtnet_info *vi)
>   {
>   	struct bpf_prog *old_prog;
>   	int i;
>   
> -	rtnl_lock();
>   	for (i = 0; i < vi->max_queue_pairs; i++) {
>   		while (vi->rq[i].pages)
>   			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
> @@ -1895,6 +1904,12 @@ static void free_receive_bufs(struct virtnet_info *vi)
>   		if (old_prog)
>   			bpf_prog_put(old_prog);
>   	}
> +}
> +
> +static void free_receive_bufs(struct virtnet_info *vi)
> +{
> +	rtnl_lock();
> +	_free_receive_bufs(vi);
>   	rtnl_unlock();
>   }
>   
> @@ -2333,9 +2348,7 @@ static int virtnet_probe(struct virtio_device *vdev)
>   		goto free_unregister_netdev;
>   	}
>   
> -	rtnl_lock();
>   	virtnet_set_queues(vi, vi->curr_queue_pairs);
> -	rtnl_unlock();
>   
>   	/* Assume link up if device can't report link status,
>   	   otherwise get link status from config. */
> @@ -2444,9 +2457,7 @@ static int virtnet_restore(struct virtio_device *vdev)
>   
>   	netif_device_attach(vi->dev);
>   
> -	rtnl_lock();
>   	virtnet_set_queues(vi, vi->curr_queue_pairs);
> -	rtnl_unlock();
>   
>   	err = virtnet_cpu_notif_add(vi);
>   	if (err)
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ