[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230529110608.597534-3-tariqt@nvidia.com>
Date: Mon, 29 May 2023 14:06:08 +0300
From: Tariq Toukan <tariqt@...dia.com>
To: Alexei Starovoitov <ast@...nel.org>, John Fastabend
<john.fastabend@...il.com>, Jakub Kicinski <kuba@...nel.org>
CC: Daniel Borkmann <daniel@...earbox.net>, Jesper Dangaard Brouer
<hawk@...nel.org>, <bpf@...r.kernel.org>, "David S. Miller"
<davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>,
<netdev@...r.kernel.org>, Gal Pressman <gal@...dia.com>, Nimrod Oren
<noren@...dia.com>, Tariq Toukan <tariqt@...dia.com>
Subject: [PATCH bpf-next 2/2] samples/bpf: fixup xdp_redirect_map tool to be able to support xdp multibuffer
From: Nimrod Oren <noren@...dia.com>
Expand the xdp multi-buffer support to xdp_redirect_map tool.
Similar to what's done in commit
772251742262 ("samples/bpf: fixup some tools to be able to support xdp multibuffer")
and its fix commit
7a698edf954c ("samples/bpf: Fix MAC address swapping in xdp2_kern").
Signed-off-by: Nimrod Oren <noren@...dia.com>
Signed-off-by: Tariq Toukan <tariqt@...dia.com>
---
samples/bpf/xdp_redirect_map.bpf.c | 31 ++++++++++++++++++++++--------
1 file changed, 23 insertions(+), 8 deletions(-)
diff --git a/samples/bpf/xdp_redirect_map.bpf.c b/samples/bpf/xdp_redirect_map.bpf.c
index 8557c278df77..dd034fdff1a9 100644
--- a/samples/bpf/xdp_redirect_map.bpf.c
+++ b/samples/bpf/xdp_redirect_map.bpf.c
@@ -35,15 +35,20 @@ struct {
/* store egress interface mac address */
const volatile __u8 tx_mac_addr[ETH_ALEN];
+#define XDPBUFSIZE 64
static __always_inline int xdp_redirect_map(struct xdp_md *ctx, void *redirect_map)
{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
+ __u8 pkt[XDPBUFSIZE] = {};
+ void *data_end = &pkt[XDPBUFSIZE-1];
+ void *data = pkt;
u32 key = bpf_get_smp_processor_id();
struct ethhdr *eth = data;
struct datarec *rec;
u64 nh_off;
+ if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt)))
+ return XDP_DROP;
+
nh_off = sizeof(*eth);
if (data + nh_off > data_end)
return XDP_DROP;
@@ -53,30 +58,37 @@ static __always_inline int xdp_redirect_map(struct xdp_md *ctx, void *redirect_m
return XDP_PASS;
NO_TEAR_INC(rec->processed);
swap_src_dst_mac(data);
+ if (bpf_xdp_store_bytes(ctx, 0, pkt, sizeof(pkt)))
+ return XDP_DROP;
+
return bpf_redirect_map(redirect_map, 0, 0);
}
-SEC("xdp")
+SEC("xdp.frags")
int xdp_redirect_map_general(struct xdp_md *ctx)
{
return xdp_redirect_map(ctx, &tx_port_general);
}
-SEC("xdp")
+SEC("xdp.frags")
int xdp_redirect_map_native(struct xdp_md *ctx)
{
return xdp_redirect_map(ctx, &tx_port_native);
}
-SEC("xdp/devmap")
+SEC("xdp.frags/devmap")
int xdp_redirect_map_egress(struct xdp_md *ctx)
{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
+ __u8 pkt[XDPBUFSIZE] = {};
+ void *data_end = &pkt[XDPBUFSIZE-1];
+ void *data = pkt;
u8 *mac_addr = (u8 *) tx_mac_addr;
struct ethhdr *eth = data;
u64 nh_off;
+ if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt)))
+ return XDP_DROP;
+
nh_off = sizeof(*eth);
if (data + nh_off > data_end)
return XDP_DROP;
@@ -84,11 +96,14 @@ int xdp_redirect_map_egress(struct xdp_md *ctx)
barrier_var(mac_addr); /* prevent optimizing out memcpy */
__builtin_memcpy(eth->h_source, mac_addr, ETH_ALEN);
+ if (bpf_xdp_store_bytes(ctx, 0, pkt, sizeof(pkt)))
+ return XDP_DROP;
+
return XDP_PASS;
}
/* Redirect require an XDP bpf_prog loaded on the TX device */
-SEC("xdp")
+SEC("xdp.frags")
int xdp_redirect_dummy_prog(struct xdp_md *ctx)
{
return XDP_PASS;
--
2.34.1
Powered by blists - more mailing lists