[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230807122238.85463-1-huangjie.albert@bytedance.com>
Date: Mon, 7 Aug 2023 20:22:38 +0800
From: Albert Huang <huangjie.albert@...edance.com>
To: unlisted-recipients:; (no To-header on input)
Cc: Albert Huang <huangjie.albert@...edance.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
netdev@...r.kernel.org (open list:NETWORKING DRIVERS),
linux-kernel@...r.kernel.org (open list),
bpf@...r.kernel.org (open list:XDP (eXpress Data Path))
Subject: [RFC v2 Optimizing veth xsk performance 3/9] veth: add support for send queue
in order to support native af_xdp for veth. we
need support for send queue for napi tx.
the upcoming patch will make use of it.
Signed-off-by: Albert Huang <huangjie.albert@...edance.com>
---
drivers/net/veth.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 77e12d52ca2b..25faba879505 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -56,6 +56,11 @@ struct veth_rq_stats {
struct u64_stats_sync syncp;
};
+struct veth_sq_stats {
+ struct veth_stats vs;
+ struct u64_stats_sync syncp;
+};
+
struct veth_rq {
struct napi_struct xdp_napi;
struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
@@ -69,11 +74,25 @@ struct veth_rq {
struct page_pool *page_pool;
};
+struct veth_sq {
+ struct napi_struct xdp_napi;
+ struct net_device *dev;
+ struct xdp_mem_info xdp_mem;
+ struct veth_sq_stats stats;
+ u32 queue_index;
+ /* for xsk */
+ struct {
+ struct xsk_buff_pool __rcu *pool;
+ u32 last_cpu;
+ } xsk;
+};
+
struct veth_priv {
struct net_device __rcu *peer;
atomic64_t dropped;
struct bpf_prog *_xdp_prog;
struct veth_rq *rq;
+ struct veth_sq *sq;
unsigned int requested_headroom;
};
@@ -1495,6 +1514,15 @@ static int veth_alloc_queues(struct net_device *dev)
u64_stats_init(&priv->rq[i].stats.syncp);
}
+ priv->sq = kcalloc(dev->num_tx_queues, sizeof(*priv->sq), GFP_KERNEL);
+ if (!priv->sq)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ priv->sq[i].dev = dev;
+ u64_stats_init(&priv->sq[i].stats.syncp);
+ }
+
return 0;
}
@@ -1503,6 +1531,7 @@ static void veth_free_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
kfree(priv->rq);
+ kfree(priv->sq);
}
static int veth_dev_init(struct net_device *dev)
--
2.20.1
Powered by blists - more mailing lists