[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEsoA_y6FV0PzoLfO-UFhJrYRe96cDpX_hHgSo7PAwshrQ@mail.gmail.com>
Date: Fri, 20 Oct 2023 14:52:18 +0800
From: Jason Wang <jasowang@...hat.com>
To: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
Cc: netdev@...r.kernel.org, "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>, Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>, John Fastabend <john.fastabend@...il.com>,
virtualization@...ts.linux-foundation.org, bpf@...r.kernel.org
Subject: Re: [PATCH net-next v1 12/19] virtio_net: xsk: tx: support wakeup
On Mon, Oct 16, 2023 at 8:01 PM Xuan Zhuo <xuanzhuo@...ux.alibaba.com> wrote:
>
> xsk wakeup is used to trigger the logic for xsk xmit by xsk framework or
> user.
>
> Virtio-Net does not support to actively generate an interruption, so it
> tries to trigger tx NAPI on the tx interrupt cpu.
>
> Consider the effect of cache. When interrupt triggers, it is
> generally fixed on a CPU. It is better to start TX Napi on the same
> CPU.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@...ux.alibaba.com>
> ---
> drivers/net/virtio/main.c | 3 ++
> drivers/net/virtio/virtio_net.h | 8 +++++
> drivers/net/virtio/xsk.c | 57 +++++++++++++++++++++++++++++++++
> drivers/net/virtio/xsk.h | 1 +
> 4 files changed, 69 insertions(+)
>
> diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
> index a08429bef61f..1a222221352e 100644
> --- a/drivers/net/virtio/main.c
> +++ b/drivers/net/virtio/main.c
> @@ -2066,6 +2066,8 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
> return 0;
> }
>
> + sq->xsk.last_cpu = smp_processor_id();
> +
> txq = netdev_get_tx_queue(vi->dev, index);
> __netif_tx_lock(txq, raw_smp_processor_id());
> virtqueue_disable_cb(sq->vq);
> @@ -3770,6 +3772,7 @@ static const struct net_device_ops virtnet_netdev = {
> .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
> .ndo_bpf = virtnet_xdp,
> .ndo_xdp_xmit = virtnet_xdp_xmit,
> + .ndo_xsk_wakeup = virtnet_xsk_wakeup,
> .ndo_features_check = passthru_features_check,
> .ndo_get_phys_port_name = virtnet_get_phys_port_name,
> .ndo_set_features = virtnet_set_features,
> diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
> index 3bbb1f5baad5..7c72a8bb1813 100644
> --- a/drivers/net/virtio/virtio_net.h
> +++ b/drivers/net/virtio/virtio_net.h
> @@ -101,6 +101,14 @@ struct virtnet_sq {
> struct xsk_buff_pool __rcu *pool;
>
> dma_addr_t hdr_dma_address;
> +
> + u32 last_cpu;
> + struct __call_single_data csd;
> +
> + /* The lock to prevent the repeat of calling
> + * smp_call_function_single_async().
> + */
> + spinlock_t ipi_lock;
> } xsk;
> };
>
> diff --git a/drivers/net/virtio/xsk.c b/drivers/net/virtio/xsk.c
> index 0e775a9d270f..973e783260c3 100644
> --- a/drivers/net/virtio/xsk.c
> +++ b/drivers/net/virtio/xsk.c
> @@ -115,6 +115,60 @@ bool virtnet_xsk_xmit(struct virtnet_sq *sq, struct xsk_buff_pool *pool,
> return sent == budget;
> }
>
> +static void virtnet_remote_napi_schedule(void *info)
> +{
> + struct virtnet_sq *sq = info;
> +
> + virtnet_vq_napi_schedule(&sq->napi, sq->vq);
> +}
> +
> +static void virtnet_remote_raise_napi(struct virtnet_sq *sq)
> +{
> + u32 last_cpu, cur_cpu;
> +
> + last_cpu = sq->xsk.last_cpu;
> + cur_cpu = get_cpu();
> +
> + /* On remote cpu, softirq will run automatically when ipi irq exit. On
> + * local cpu, smp_call_xxx will not trigger ipi interrupt, then softirq
> + * cannot be triggered automatically. So Call local_bh_enable after to
> + * trigger softIRQ processing.
> + */
> + if (last_cpu == cur_cpu) {
> + local_bh_disable();
> + virtnet_vq_napi_schedule(&sq->napi, sq->vq);
> + local_bh_enable();
> + } else {
> + if (spin_trylock(&sq->xsk.ipi_lock)) {
> + smp_call_function_single_async(last_cpu, &sq->xsk.csd);
> + spin_unlock(&sq->xsk.ipi_lock);
> + }
> + }
Is there any number to show whether it's worth it for an IPI here? For
example, GVE doesn't do this.
Thanks
> +
> + put_cpu();
> +}
> +
> +int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
> +{
> + struct virtnet_info *vi = netdev_priv(dev);
> + struct virtnet_sq *sq;
> +
> + if (!netif_running(dev))
> + return -ENETDOWN;
> +
> + if (qid >= vi->curr_queue_pairs)
> + return -EINVAL;
> +
> + sq = &vi->sq[qid];
> +
> + if (napi_if_scheduled_mark_missed(&sq->napi))
> + return 0;
> +
> + virtnet_remote_raise_napi(sq);
> +
> + return 0;
> +}
> +
> static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct virtnet_rq *rq,
> struct xsk_buff_pool *pool)
> {
> @@ -240,6 +294,9 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
>
> sq->xsk.hdr_dma_address = hdr_dma;
>
> + INIT_CSD(&sq->xsk.csd, virtnet_remote_napi_schedule, sq);
> + spin_lock_init(&sq->xsk.ipi_lock);
> +
> return 0;
>
> err_sq:
> diff --git a/drivers/net/virtio/xsk.h b/drivers/net/virtio/xsk.h
> index 73ca8cd5308b..1bd19dcda649 100644
> --- a/drivers/net/virtio/xsk.h
> +++ b/drivers/net/virtio/xsk.h
> @@ -17,4 +17,5 @@ static inline void *virtnet_xsk_to_ptr(u32 len)
> int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
> bool virtnet_xsk_xmit(struct virtnet_sq *sq, struct xsk_buff_pool *pool,
> int budget);
> +int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag);
> #endif
> --
> 2.32.0.3.g01195cf9f
>
Powered by blists - more mailing lists