lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20140116074209.GB20993@redhat.com>
Date:	Thu, 16 Jan 2014 09:42:09 +0200
From:	"Michael S. Tsirkin" <mst@...hat.com>
To:	Jason Wang <jasowang@...hat.com>
Cc:	davem@...emloft.net, virtualization@...ts.linux-foundation.org,
	netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
	Rusty Russell <rusty@...tcorp.com.au>
Subject: Re: [PATCH net-next] virtio-net: drop rq->max and rq->num

On Thu, Jan 16, 2014 at 02:45:24PM +0800, Jason Wang wrote:
> It looks like there's no need for those two fields:
> 
> - Unless there's a failure for the first refill try, rq->max should be always
>   equal to the vring size.
> - rq->num is only used to determine the condition that we need to do the refill,
>   we could check vq->num_free instead.
> - rq->num was required to be increased or decreased explicitly after each
>   get/put which results a bad API.
> 
> So this patch removes them both to make the code simpler.
> 
> Cc: Rusty Russell <rusty@...tcorp.com.au>
> Cc: Michael S. Tsirkin <mst@...hat.com>
> Signed-off-by: Jason Wang <jasowang@...hat.com>
> Acked-by: Rusty Russell <rusty@...tcorp.com.au>


Acked-by: Michael S. Tsirkin <mst@...hat.com>

> ---
>  drivers/net/virtio_net.c |   16 +++-------------
>  1 files changed, 3 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 7b17240..9bd70aa 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -72,9 +72,6 @@ struct receive_queue {
>  
>  	struct napi_struct napi;
>  
> -	/* Number of input buffers, and max we've ever had. */
> -	unsigned int num, max;
> -
>  	/* Chain pages by the private ptr. */
>  	struct page *pages;
>  
> @@ -360,7 +357,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>  		}
>  
>  		page = virt_to_head_page(buf);
> -		--rq->num;
>  
>  		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
>  		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
> @@ -406,7 +402,6 @@ err_skb:
>  		}
>  		page = virt_to_head_page(buf);
>  		put_page(page);
> -		--rq->num;
>  	}
>  err_buf:
>  	dev->stats.rx_dropped++;
> @@ -628,10 +623,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
>  		oom = err == -ENOMEM;
>  		if (err)
>  			break;
> -		++rq->num;
>  	} while (rq->vq->num_free);
> -	if (unlikely(rq->num > rq->max))
> -		rq->max = rq->num;
>  	if (unlikely(!virtqueue_kick(rq->vq)))
>  		return false;
>  	return !oom;
> @@ -699,11 +691,10 @@ again:
>  	while (received < budget &&
>  	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
>  		receive_buf(rq, buf, len);
> -		--rq->num;
>  		received++;
>  	}
>  
> -	if (rq->num < rq->max / 2) {
> +	if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
>  		if (!try_fill_recv(rq, GFP_ATOMIC))
>  			schedule_delayed_work(&vi->refill, 0);
>  	}
> @@ -1398,9 +1389,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
>  				give_pages(&vi->rq[i], buf);
>  			else
>  				dev_kfree_skb(buf);
> -			--vi->rq[i].num;
>  		}
> -		BUG_ON(vi->rq[i].num != 0);
>  	}
>  }
>  
> @@ -1671,7 +1660,8 @@ static int virtnet_probe(struct virtio_device *vdev)
>  		try_fill_recv(&vi->rq[i], GFP_KERNEL);
>  
>  		/* If we didn't even get one input buffer, we're useless. */
> -		if (vi->rq[i].num == 0) {
> +		if (vi->rq[i].vq->num_free ==
> +		    virtqueue_get_vring_size(vi->rq[i].vq)) {
>  			free_unused_bufs(vi);
>  			err = -ENOMEM;
>  			goto free_recv_bufs;
> -- 
> 1.7.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ