[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250321002910.1343422-7-hramamurthy@google.com>
Date: Fri, 21 Mar 2025 00:29:10 +0000
From: Harshitha Ramamurthy <hramamurthy@...gle.com>
To: netdev@...r.kernel.org
Cc: jeroendb@...gle.com, hramamurthy@...gle.com, andrew+netdev@...n.ch,
davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org, pabeni@...hat.com,
ast@...nel.org, daniel@...earbox.net, hawk@...nel.org,
john.fastabend@...il.com, pkaligineedi@...gle.com, willemb@...gle.com,
ziweixiao@...gle.com, joshwash@...gle.com, horms@...nel.org,
shailend@...gle.com, bcf@...gle.com, linux-kernel@...r.kernel.org,
bpf@...r.kernel.org
Subject: [PATCH net-next 6/6] gve: add XDP DROP and PASS support for DQ
From: Joshua Washington <joshwash@...gle.com>
This patch adds support for running XDP programs on DQ, along with
rudimentary processing for XDP_DROP and XDP_PASS. These actions require
very limited driver functionality when it comes to processing an XDP
buffer, so currently if the XDP action is not XDP_PASS, the packet is
dropped and stats are updated.
Reviewed-by: Willem de Bruijn <willemb@...gle.com>
Signed-off-by: Praveen Kaliginedi <pkaligineedi@...gle.com>
Signed-off-by: Joshua Washington <joshwash@...gle.com>
Signed-off-by: Harshitha Ramamurthy<hramamurthy@...gle.com>
---
drivers/net/ethernet/google/gve/gve_rx_dqo.c | 52 ++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 2edf3c632cbd..deb869eb38e0 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -546,6 +546,29 @@ static int gve_rx_append_frags(struct napi_struct *napi,
return 0;
}
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ u64_stats_update_begin(&rx->statss);
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ rx->xdp_actions[xdp_act]++;
+ break;
+ case XDP_TX:
+ rx->xdp_tx_errors++;
+ break;
+ case XDP_REDIRECT:
+ rx->xdp_redirect_errors++;
+ break;
+ }
+ u64_stats_update_end(&rx->statss);
+ gve_free_buffer(rx, buf_state);
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -560,6 +583,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
+ struct bpf_prog *xprog;
u16 buf_len;
u16 hdr_len;
@@ -633,6 +657,34 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (xprog) {
+ struct xdp_buff xdp;
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+ &rx->xdp_rxq);
+ xdp_prepare_buff(&xdp,
+ buf_state->page_info.page_address +
+ buf_state->page_info.page_offset,
+ buf_state->page_info.pad,
+ buf_len, false);
+ old_data = xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+ buf_state->page_info.pad += xdp.data - old_data;
+ buf_len = xdp.data_end - xdp.data;
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+ buf_state);
+ return 0;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ }
+
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
--
2.49.0.rc1.451.g8f38331e32-goog
Powered by blists - more mailing lists