[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251220235135.1078587-6-pvalerio@redhat.com>
Date: Sun, 21 Dec 2025 00:51:32 +0100
From: Paolo Valerio <pvalerio@...hat.com>
To: netdev@...r.kernel.org
Cc: Nicolas Ferre <nicolas.ferre@...rochip.com>,
Claudiu Beznea <claudiu.beznea@...on.dev>,
Andrew Lunn <andrew+netdev@...n.ch>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Lorenzo Bianconi <lorenzo@...nel.org>,
Théo Lebrun <theo.lebrun@...tlin.com>
Subject: [PATCH RFC net-next v2 5/8] cadence: macb: add XDP support for gem
Introduce basic XDP support for macb/gem with the XDP_PASS,
XDP_DROP, XDP_REDIRECT verdict support.
Signed-off-by: Paolo Valerio <pvalerio@...hat.com>
---
drivers/net/ethernet/cadence/macb.h | 3 +
drivers/net/ethernet/cadence/macb_main.c | 184 ++++++++++++++++++++---
2 files changed, 169 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 45c04157f153..815d50574267 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -16,6 +16,7 @@
#include <linux/workqueue.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
+#include <linux/bpf_trace.h>
#define MACB_GREGS_NBR 16
#define MACB_GREGS_VERSION 2
@@ -1270,6 +1271,7 @@ struct macb_queue {
struct queue_stats stats;
struct page_pool *page_pool;
struct sk_buff *skb;
+ struct xdp_rxq_info xdp_rxq;
};
struct ethtool_rx_fs_item {
@@ -1369,6 +1371,7 @@ struct macb {
struct macb_pm_data pm_data;
const struct macb_usrio_config *usrio;
+ struct bpf_prog __rcu *prog;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 582ceb728124..f767eb2e272e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5,6 +5,7 @@
* Copyright (C) 2004-2006 Atmel Corporation
*/
+#include <asm-generic/errno.h>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/circ_buf.h>
#include <linux/clk-provider.h>
@@ -1249,9 +1250,19 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
return packets;
}
+static int gem_max_rx_data_size(int base_sz)
+{
+ return SKB_DATA_ALIGN(base_sz + ETH_HLEN + ETH_FCS_LEN);
+}
+
+static int gem_max_rx_buffer_size(int data_sz, struct macb *bp)
+{
+ return SKB_HEAD_ALIGN(data_sz + bp->rx_headroom);
+}
+
static int gem_total_rx_buffer_size(struct macb *bp)
{
- return SKB_HEAD_ALIGN(bp->rx_buffer_size + bp->rx_headroom);
+ return gem_max_rx_buffer_size(bp->rx_buffer_size, bp);
}
static int gem_rx_refill(struct macb_queue *queue, bool napi)
@@ -1336,10 +1347,59 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
*/
}
+static u32 gem_xdp_run(struct macb_queue *queue, void *buff_head,
+ unsigned int len)
+{
+ struct net_device *dev;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
+
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+
+ prog = rcu_dereference(queue->bp->prog);
+ if (!prog)
+ goto out;
+
+ xdp_init_buff(&xdp, gem_total_rx_buffer_size(queue->bp), &queue->xdp_rxq);
+ xdp_prepare_buff(&xdp, buff_head, queue->bp->rx_headroom, len, false);
+ xdp_buff_clear_frags_flag(&xdp);
+ dev = queue->bp->dev;
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ goto out;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, &xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+ goto out;
+ default:
+ bpf_warn_invalid_xdp_action(dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(queue->page_pool,
+ virt_to_head_page(xdp.data), true);
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
int budget)
{
struct macb *bp = queue->bp;
+ bool xdp_flush = false;
unsigned int len;
unsigned int entry;
struct macb_dma_desc *desc;
@@ -1352,9 +1412,10 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
while (count < budget) {
- u32 ctrl;
- dma_addr_t addr;
bool rxused, first_frame;
+ dma_addr_t addr;
+ u32 ctrl;
+ u32 ret;
entry = macb_rx_ring_wrap(bp, queue->rx_tail);
desc = macb_rx_desc(queue, entry);
@@ -1402,6 +1463,17 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
data_len = bp->rx_buffer_size;
}
+ if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF)))
+ goto skip_xdp;
+
+ ret = gem_xdp_run(queue, buff_head, len);
+ if (ret == XDP_REDIRECT)
+ xdp_flush = true;
+
+ if (ret != XDP_PASS)
+ goto next_frame;
+
+skip_xdp:
if (first_frame) {
queue->skb = napi_build_skb(buff_head, gem_total_rx_buffer_size(bp));
if (unlikely(!queue->skb)) {
@@ -1451,10 +1523,6 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
}
/* now everything is ready for receiving packet */
- queue->rx_buff[entry] = NULL;
-
- netdev_vdbg(bp->dev, "%s %u (len %u)\n", __func__, entry, data_len);
-
if (ctrl & MACB_BIT(RX_EOF)) {
bp->dev->stats.rx_packets++;
queue->stats.rx_packets++;
@@ -1476,6 +1544,8 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
queue->skb = NULL;
}
+next_frame:
+ queue->rx_buff[entry] = NULL;
continue;
free_frags:
@@ -1493,6 +1563,9 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
queue->rx_buff[entry] = NULL;
}
+ if (xdp_flush)
+ xdp_do_flush();
+
gem_rx_refill(queue, true);
return count;
@@ -2430,16 +2503,11 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void macb_init_rx_buffer_size(struct macb *bp, unsigned int mtu)
{
int overhead;
- size_t size;
if (!macb_is_gem(bp)) {
bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
} else {
- size = mtu + ETH_HLEN + ETH_FCS_LEN;
- if (!(bp->caps & MACB_CAPS_RSC))
- size += NET_IP_ALIGN;
-
- bp->rx_buffer_size = SKB_DATA_ALIGN(size);
+ bp->rx_buffer_size = gem_max_rx_data_size(mtu);
if (gem_total_rx_buffer_size(bp) > PAGE_SIZE) {
overhead = bp->rx_headroom +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2484,6 +2552,8 @@ static void gem_free_rx_buffers(struct macb *bp)
kfree(queue->rx_buff);
queue->rx_buff = NULL;
+ if (xdp_rxq_info_is_reg(&queue->xdp_rxq))
+ xdp_rxq_info_unreg(&queue->xdp_rxq);
page_pool_destroy(queue->page_pool);
queue->page_pool = NULL;
}
@@ -2640,30 +2710,55 @@ static int macb_alloc_consistent(struct macb *bp)
return -ENOMEM;
}
-static int gem_create_page_pool(struct macb_queue *queue)
+static int gem_create_page_pool(struct macb_queue *queue, int qid)
{
struct page_pool_params pp_params = {
.order = 0,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = queue->bp->rx_ring_size,
.nid = NUMA_NO_NODE,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = rcu_access_pointer(queue->bp->prog)
+ ? DMA_BIDIRECTIONAL
+ : DMA_FROM_DEVICE,
.dev = &queue->bp->pdev->dev,
.napi = &queue->napi_rx,
.max_len = PAGE_SIZE,
};
struct page_pool *pool;
- int err = 0;
+ int err;
pool = page_pool_create(&pp_params);
if (IS_ERR(pool)) {
netdev_err(queue->bp->dev, "cannot create rx page pool\n");
err = PTR_ERR(pool);
- pool = NULL;
+ goto clear_pool;
}
queue->page_pool = pool;
+ err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->bp->dev, qid,
+ queue->napi_rx.napi_id);
+ if (err < 0) {
+ netdev_err(queue->bp->dev, "xdp: failed to register rxq info\n");
+ goto destroy_pool;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ queue->page_pool);
+ if (err) {
+ netdev_err(queue->bp->dev, "xdp: failed to register rxq memory model\n");
+ goto unreg_info;
+ }
+
+ return 0;
+
+unreg_info:
+ xdp_rxq_info_unreg(&queue->xdp_rxq);
+destroy_pool:
+ page_pool_destroy(pool);
+clear_pool:
+ queue->page_pool = NULL;
+
return err;
}
@@ -2705,7 +2800,7 @@ static int gem_init_rings(struct macb *bp, bool fail_early)
/* This is a hard failure, so the best we can do is try the
* next queue in case of HRESP error.
*/
- err = gem_create_page_pool(queue);
+ err = gem_create_page_pool(queue, q);
if (err) {
last_err = err;
if (fail_early)
@@ -3156,11 +3251,27 @@ static int macb_close(struct net_device *dev)
return 0;
}
+static bool gem_xdp_valid_mtu(struct macb *bp, int mtu)
+{
+ int max_frame_size;
+
+ max_frame_size = gem_max_rx_buffer_size(gem_max_rx_data_size(mtu), bp);
+
+ return max_frame_size <= PAGE_SIZE;
+}
+
static int macb_change_mtu(struct net_device *dev, int new_mtu)
{
+ struct macb *bp = netdev_priv(dev);
+
if (netif_running(dev))
return -EBUSY;
+ if (rcu_access_pointer(bp->prog) && !gem_xdp_valid_mtu(bp, new_mtu)) {
+ netdev_err(dev, "MTU %d too large for XDP", new_mtu);
+ return -EINVAL;
+ }
+
WRITE_ONCE(dev->mtu, new_mtu);
return 0;
@@ -3178,6 +3289,39 @@ static int macb_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
+static int gem_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !gem_xdp_valid_mtu(bp, dev->mtu)) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ old_prog = rcu_replace_pointer(bp->prog, prog, lockdep_rtnl_is_held());
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int gem_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ if (!macb_is_gem(bp))
+ return -EOPNOTSUPP;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return gem_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void gem_update_stats(struct macb *bp)
{
struct macb_queue *queue;
@@ -4431,6 +4575,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_hwtstamp_set = macb_hwtstamp_set,
.ndo_hwtstamp_get = macb_hwtstamp_get,
.ndo_setup_tc = macb_setup_tc,
+ .ndo_bpf = gem_xdp,
};
/* Configure peripheral capabilities according to device tree
@@ -5734,6 +5879,9 @@ static int macb_probe(struct platform_device *pdev)
bp->rx_headroom = XDP_PACKET_HEADROOM;
if (!(bp->caps & MACB_CAPS_RSC))
bp->rx_headroom += NET_IP_ALIGN;
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT;
}
netif_carrier_off(dev);
--
2.52.0
Powered by blists - more mailing lists