[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201119083024.119566-9-bjorn.topel@gmail.com>
Date: Thu, 19 Nov 2020 09:30:22 +0100
From: Björn Töpel <bjorn.topel@...il.com>
To: netdev@...r.kernel.org, bpf@...r.kernel.org
Cc: Björn Töpel <bjorn.topel@...el.com>,
magnus.karlsson@...el.com, ast@...nel.org, daniel@...earbox.net,
maciej.fijalkowski@...el.com, sridhar.samudrala@...el.com,
jesse.brandeburg@...el.com, qi.z.zhang@...el.com, kuba@...nel.org,
edumazet@...gle.com, jonathan.lemon@...il.com, maximmi@...dia.com
Subject: [PATCH bpf-next v3 08/10] samples/bpf: use recvfrom() in xdpsock/l2fwd
From: Björn Töpel <bjorn.topel@...el.com>
Start using recvfrom() the l2fwd scenario, instead of poll() which is
more expensive and need additional knobs for busy-polling.
Signed-off-by: Björn Töpel <bjorn.topel@...el.com>
---
samples/bpf/xdpsock_user.c | 25 +++++++++++--------------
1 file changed, 11 insertions(+), 14 deletions(-)
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index f90111b95b2e..24aa7511c4c8 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -1098,8 +1098,7 @@ static void kick_tx(struct xsk_socket_info *xsk)
exit_with_error(errno);
}
-static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
- struct pollfd *fds)
+static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk)
{
struct xsk_umem_info *umem = xsk->umem;
u32 idx_cq = 0, idx_fq = 0;
@@ -1134,7 +1133,7 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
exit_with_error(-ret);
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
xsk->app_stats.fill_fail_polls++;
- ret = poll(fds, num_socks, opt_timeout);
+ recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
}
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
@@ -1331,19 +1330,19 @@ static void tx_only_all(void)
complete_tx_only_all();
}
-static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
+static void l2fwd(struct xsk_socket_info *xsk)
{
unsigned int rcvd, i;
u32 idx_rx = 0, idx_tx = 0;
int ret;
- complete_tx_l2fwd(xsk, fds);
+ complete_tx_l2fwd(xsk);
rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
if (!rcvd) {
if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
xsk->app_stats.rx_empty_polls++;
- ret = poll(fds, num_socks, opt_timeout);
+ recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
}
return;
}
@@ -1353,7 +1352,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
while (ret != rcvd) {
if (ret < 0)
exit_with_error(-ret);
- complete_tx_l2fwd(xsk, fds);
+ complete_tx_l2fwd(xsk);
if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
xsk->app_stats.tx_wakeup_sendtos++;
kick_tx(xsk);
@@ -1388,22 +1387,20 @@ static void l2fwd_all(void)
struct pollfd fds[MAX_SOCKS] = {};
int i, ret;
- for (i = 0; i < num_socks; i++) {
- fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
- fds[i].events = POLLOUT | POLLIN;
- }
-
for (;;) {
if (opt_poll) {
- for (i = 0; i < num_socks; i++)
+ for (i = 0; i < num_socks; i++) {
+ fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
+ fds[i].events = POLLOUT | POLLIN;
xsks[i]->app_stats.opt_polls++;
+ }
ret = poll(fds, num_socks, opt_timeout);
if (ret <= 0)
continue;
}
for (i = 0; i < num_socks; i++)
- l2fwd(xsks[i], fds);
+ l2fwd(xsks[i]);
if (benchmark_done)
break;
--
2.27.0
Powered by blists - more mailing lists