[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230803140441.53596-4-huangjie.albert@bytedance.com>
Date: Thu, 3 Aug 2023 22:04:29 +0800
From: "huangjie.albert" <huangjie.albert@...edance.com>
To: davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
pabeni@...hat.com
Cc: "huangjie.albert" <huangjie.albert@...edance.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Björn Töpel <bjorn@...nel.org>,
Magnus Karlsson <magnus.karlsson@...el.com>,
Maciej Fijalkowski <maciej.fijalkowski@...el.com>,
Jonathan Lemon <jonathan.lemon@...il.com>,
Pavel Begunkov <asml.silence@...il.com>,
Kees Cook <keescook@...omium.org>,
Richard Gobert <richardbgobert@...il.com>,
Yunsheng Lin <linyunsheng@...wei.com>,
netdev@...r.kernel.org (open list:NETWORKING DRIVERS),
linux-kernel@...r.kernel.org (open list),
bpf@...r.kernel.org (open list:XDP (eXpress Data Path))
Subject: [RFC Optimizing veth xsk performance 03/10] veth: add support for send queue
in order to support native af_xdp for veth. we
need support for send queue for napi tx.
the upcoming patch will make use of it.
Signed-off-by: huangjie.albert <huangjie.albert@...edance.com>
---
drivers/net/veth.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c2b431a7a017..63c3ebe4c5d0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -56,6 +56,11 @@ struct veth_rq_stats {
struct u64_stats_sync syncp;
};
+struct veth_sq_stats {
+ struct veth_stats vs;
+ struct u64_stats_sync syncp;
+};
+
struct veth_rq {
struct napi_struct xdp_napi;
struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
@@ -69,11 +74,25 @@ struct veth_rq {
struct page_pool *page_pool;
};
+struct veth_sq {
+ struct napi_struct xdp_napi;
+ struct net_device *dev;
+ struct xdp_mem_info xdp_mem;
+ struct veth_sq_stats stats;
+ u32 queue_index;
+ /* this is for xsk */
+ struct {
+ struct xsk_buff_pool __rcu *pool;
+ u32 last_cpu;
+ }xsk;
+};
+
struct veth_priv {
struct net_device __rcu *peer;
atomic64_t dropped;
struct bpf_prog *_xdp_prog;
struct veth_rq *rq;
+ struct veth_sq *sq;
unsigned int requested_headroom;
};
@@ -1495,6 +1514,15 @@ static int veth_alloc_queues(struct net_device *dev)
u64_stats_init(&priv->rq[i].stats.syncp);
}
+ priv->sq = kcalloc(dev->num_tx_queues, sizeof(*priv->sq), GFP_KERNEL);
+ if (!priv->sq)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ priv->sq[i].dev = dev;
+ u64_stats_init(&priv->sq[i].stats.syncp);
+ }
+
return 0;
}
@@ -1503,6 +1531,7 @@ static void veth_free_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
kfree(priv->rq);
+ kfree(priv->sq);
}
static int veth_dev_init(struct net_device *dev)
--
2.20.1
Powered by blists - more mailing lists