[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5181fa43-4d7f-8db0-0795-96563f8b1dd3@redhat.com>
Date: Wed, 27 Jun 2018 22:56:58 +0800
From: Jason Wang <jasowang@...hat.com>
To: Jesper Dangaard Brouer <brouer@...hat.com>, netdev@...r.kernel.org
Cc: John Fastabend <john.fastabend@...il.com>,
Daniel Borkmann <borkmann@...earbox.net>,
BjörnTöpel <bjorn.topel@...el.com>,
Alexei Starovoitov <alexei.starovoitov@...il.com>
Subject: Re: [PATCH 3/3] virtio_net: split XDP_TX kick and XDP_REDIRECT map
flushing
On 2018年06月26日 23:39, Jesper Dangaard Brouer wrote:
> The driver was combining XDP_TX virtqueue_kick and XDP_REDIRECT
> map flushing (xdp_do_flush_map). This is suboptimal, these two
> flush operations should be kept separate.
>
> The suboptimal behavior was introduced in commit 9267c430c6b6
> ("virtio-net: add missing virtqueue kick when flushing packets").
>
> Fixes: 9267c430c6b6 ("virtio-net: add missing virtqueue kick when flushing packets")
> Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
> ---
> drivers/net/virtio_net.c | 30 +++++++++++++++++++-----------
> 1 file changed, 19 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 1619ee3070b6..ae47ecf80c2d 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
> /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
> #define VIRTIO_XDP_HEADROOM 256
>
> +/* Separating two types of XDP xmit */
> +#define VIRTIO_XDP_TX BIT(0)
> +#define VIRTIO_XDP_REDIR BIT(1)
> +
> /* RX packet size EWMA. The average packet size is used to determine the packet
> * buffer size when refilling RX rings. As the entire RX ring may be refilled
> * at once, the weight is chosen so that the EWMA will be insensitive to short-
> @@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
> struct receive_queue *rq,
> void *buf, void *ctx,
> unsigned int len,
> - bool *xdp_xmit)
> + unsigned int *xdp_xmit)
> {
> struct sk_buff *skb;
> struct bpf_prog *xdp_prog;
> @@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
> trace_xdp_exception(vi->dev, xdp_prog, act);
> goto err_xdp;
> }
> - *xdp_xmit = true;
> + *xdp_xmit |= VIRTIO_XDP_TX;
> rcu_read_unlock();
> goto xdp_xmit;
> case XDP_REDIRECT:
> err = xdp_do_redirect(dev, &xdp, xdp_prog);
> if (err)
> goto err_xdp;
> - *xdp_xmit = true;
> + *xdp_xmit |= VIRTIO_XDP_REDIR;
> rcu_read_unlock();
> goto xdp_xmit;
> default:
> @@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> void *buf,
> void *ctx,
> unsigned int len,
> - bool *xdp_xmit)
> + unsigned int *xdp_xmit)
> {
> struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
> u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
> @@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> put_page(xdp_page);
> goto err_xdp;
> }
> - *xdp_xmit = true;
> + *xdp_xmit |= VIRTIO_XDP_TX;
> if (unlikely(xdp_page != page))
> put_page(page);
> rcu_read_unlock();
> @@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> put_page(xdp_page);
> goto err_xdp;
> }
> - *xdp_xmit = true;
> + *xdp_xmit |= VIRTIO_XDP_REDIR;
> if (unlikely(xdp_page != page))
> put_page(page);
> rcu_read_unlock();
> @@ -939,7 +943,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
> }
>
> static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
> - void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
> + void *buf, unsigned int len, void **ctx,
> + unsigned int *xdp_xmit)
> {
> struct net_device *dev = vi->dev;
> struct sk_buff *skb;
> @@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
> }
> }
>
> -static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
> +static int virtnet_receive(struct receive_queue *rq, int budget,
> + unsigned int *xdp_xmit)
> {
> struct virtnet_info *vi = rq->vq->vdev->priv;
> unsigned int len, received = 0, bytes = 0;
> @@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
> struct virtnet_info *vi = rq->vq->vdev->priv;
> struct send_queue *sq;
> unsigned int received, qp;
> - bool xdp_xmit = false;
> + unsigned int xdp_xmit = 0;
>
> virtnet_poll_cleantx(rq);
>
> @@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
> if (received < budget)
> virtqueue_napi_complete(napi, rq->vq, received);
>
> - if (xdp_xmit) {
> + if (xdp_xmit & VIRTIO_XDP_REDIR)
> + xdp_do_flush_map();
> +
> + if (xdp_xmit & VIRTIO_XDP_TX) {
> qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
> smp_processor_id();
> sq = &vi->sq[qp];
> virtqueue_kick(sq->vq);
> - xdp_do_flush_map();
> }
>
> return received;
>
Acked-by: Jason Wang <jasowang@...hat.com>
Thanks
Powered by blists - more mailing lists