[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1536567880-15097-3-git-send-email-ilias.apalodimas@linaro.org>
Date: Mon, 10 Sep 2018 11:24:40 +0300
From: Ilias Apalodimas <ilias.apalodimas@...aro.org>
To: netdev@...r.kernel.org, jaswinder.singh@...aro.org
Cc: ard.biesheuvel@...aro.org, masami.hiramatsu@...aro.org,
arnd@...db.de, mykyta.iziumtsev@...aro.org, bjorn.topel@...el.com,
magnus.karlsson@...el.com,
Ilias Apalodimas <ilias.apalodimas@...aro.org>
Subject: [net-next, PATCH 2/2, v1] net: socionext: add AF_XDP support
Add basic AF_XDP support without zero-copy
Signed-off-by: Ilias Apalodimas <ilias.apalodimas@...aro.org>
---
drivers/net/ethernet/socionext/netsec.c | 211 ++++++++++++++++++++++++++++++--
1 file changed, 202 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 666fee2..7464ca6 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -9,6 +9,8 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
@@ -238,6 +240,11 @@
#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
+#define NETSEC_XDP_PASS 0
+#define NETSEC_XDP_CONSUMED BIT(0)
+#define NETSEC_XDP_TX BIT(1)
+#define NETSEC_XDP_REDIR BIT(2)
+
enum ring_id {
NETSEC_RING_TX = 0,
NETSEC_RING_RX
@@ -256,11 +263,13 @@ struct netsec_desc_ring {
void *vaddr;
u16 pkt_cnt;
u16 head, tail;
+ struct xdp_rxq_info xdp_rxq;
};
struct netsec_priv {
struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
struct ethtool_coalesce et_coalesce;
+ struct bpf_prog *xdp_prog;
spinlock_t reglock; /* protect reg access */
struct napi_struct napi;
phy_interface_t phy_interface;
@@ -297,6 +306,8 @@ struct netsec_rx_pkt_info {
};
static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num);
+static u32 netsec_run_xdp(struct netsec_desc *desc, struct netsec_priv *priv,
+ struct bpf_prog *prog, u16 len);
static void *netsec_alloc_rx_data(struct netsec_priv *priv,
dma_addr_t *dma_addr, u16 *len);
@@ -613,13 +624,23 @@ static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
eop = (entry->attr >> NETSEC_TX_LAST) & 1;
- dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
- DMA_TO_DEVICE);
- if (eop) {
- pkts++;
+ if (desc->skb) {
+ dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+ DMA_TO_DEVICE);
+ }
+
+ if (!eop) {
+ *desc = (struct netsec_desc){};
+ continue;
+ }
+
+ if (!desc->skb) {
+ skb_free_frag(desc->addr);
+ } else {
bytes += desc->skb->len;
dev_kfree_skb(desc->skb);
}
+ pkts++;
*desc = (struct netsec_desc){};
}
dring->pkt_cnt -= budget;
@@ -659,8 +680,11 @@ static void nsetsec_adv_desc(u16 *idx)
static int netsec_process_rx(struct netsec_priv *priv, int budget)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
struct net_device *ndev = priv->ndev;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ u32 xdp_flush = 0;
+ u32 xdp_result;
int done = 0;
while (done < budget) {
@@ -707,6 +731,26 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
if (unlikely(!buf_addr))
break;
+ if (xdp_prog) {
+ xdp_result = netsec_run_xdp(desc, priv, xdp_prog,
+ pkt_len);
+ if (xdp_result != NETSEC_XDP_PASS) {
+ xdp_flush |= xdp_result & NETSEC_XDP_REDIR;
+
+ dma_unmap_single_attrs(priv->dev,
+ desc->dma_addr,
+ desc->len, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ desc->len = desc_len;
+ desc->dma_addr = dma_handle;
+ desc->addr = buf_addr;
+ netsec_rx_fill(priv, idx, 1);
+ nsetsec_adv_desc(&dring->tail);
+ }
+ continue;
+ }
+
skb = build_skb(desc->addr, desc->len);
if (unlikely(!skb)) {
dma_unmap_single(priv->dev, dma_handle, desc_len,
@@ -740,6 +784,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
nsetsec_adv_desc(&dring->tail);
}
+ if (xdp_flush & NETSEC_XDP_REDIR)
+ xdp_do_flush_map();
+
return done;
}
@@ -892,6 +939,9 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
if (!dring->vaddr || !dring->desc)
return;
+ if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
+ xdp_rxq_info_unreg(&dring->xdp_rxq);
+
for (idx = 0; idx < DESC_NUM; idx++) {
desc = &dring->desc[idx];
if (!desc->addr)
@@ -994,7 +1044,7 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- int i;
+ int i, err;
for (i = 0; i < DESC_NUM; i++) {
struct netsec_desc *desc = &dring->desc[i];
@@ -1003,20 +1053,29 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
u16 len;
buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
- if (!buf) {
- netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+ if (!buf)
goto err_out;
- }
desc->dma_addr = dma_handle;
desc->addr = buf;
desc->len = len;
}
netsec_rx_fill(priv, 0, DESC_NUM);
+ err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
+ if (err)
+ goto err_out;
+
+ err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&dring->xdp_rxq);
+ goto err_out;
+ }
return 0;
err_out:
+ netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
return -ENOMEM;
}
@@ -1353,6 +1412,9 @@ static int netsec_netdev_stop(struct net_device *ndev)
napi_disable(&priv->napi);
+ if (priv->xdp_prog)
+ bpf_prog_put(priv->xdp_prog);
+
netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
netsec_stop_gmac(priv);
@@ -1420,6 +1482,136 @@ static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
return phy_mii_ioctl(ndev->phydev, ifr, cmd);
}
+static u32 netsec_xmit_xdp(struct netsec_priv *priv, struct xdp_buff *xdp,
+ struct netsec_desc *rx_desc)
+{
+ struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
+ struct netsec_tx_pkt_ctrl tx_ctrl = {};
+ struct netsec_desc tx_desc;
+ int filled;
+ u32 len;
+
+ len = xdp->data_end - xdp->data;
+
+ if (tx_ring->head >= tx_ring->tail)
+ filled = tx_ring->head - tx_ring->tail;
+ else
+ filled = tx_ring->head + DESC_NUM - tx_ring->tail;
+
+ if (DESC_NUM - filled <= 1)
+ return NETSEC_XDP_CONSUMED;
+
+ dma_sync_single_for_device(priv->dev, rx_desc->dma_addr, len,
+ DMA_TO_DEVICE);
+
+ tx_desc.dma_addr = rx_desc->dma_addr;
+ tx_desc.addr = xdp->data;
+ tx_desc.len = len;
+
+ netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, NULL);
+ netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
+
+ return NETSEC_XDP_TX;
+}
+
+static u32 netsec_run_xdp(struct netsec_desc *desc, struct netsec_priv *priv,
+ struct bpf_prog *prog, u16 len)
+
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ struct xdp_buff xdp;
+ u32 ret = NETSEC_XDP_PASS;
+ int err;
+ u32 act;
+
+ xdp.data_hard_start = desc->addr;
+ xdp.data = desc->addr;
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_end = xdp.data + len;
+ xdp.rxq = &dring->xdp_rxq;
+
+ rcu_read_lock();
+ act = bpf_prog_run_xdp(prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ ret = NETSEC_XDP_PASS;
+ break;
+ case XDP_TX:
+ ret = netsec_xmit_xdp(priv, &xdp, desc);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(priv->ndev, &xdp, prog);
+ if (!err) {
+ ret = NETSEC_XDP_REDIR;
+ } else {
+ ret = NETSEC_XDP_CONSUMED;
+ xdp_return_buff(&xdp);
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->ndev, prog, act);
+ /* fall through -- handle aborts by dropping packet */
+ case XDP_DROP:
+ ret = NETSEC_XDP_CONSUMED;
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog)
+{
+ struct net_device *dev = priv->ndev;
+ struct bpf_prog *old_prog;
+
+ /* For now just support only the usual MTU sized frames */
+ if (prog && dev->mtu > 1500) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (netif_running(dev))
+ netsec_netdev_stop(dev);
+
+ /* Detach old prog, if any */
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (priv->xdp_prog) {
+ /* Attach BPF program */
+ priv->xdp_prog = bpf_prog_add(priv->xdp_prog, 1);
+ if (IS_ERR(priv->xdp_prog))
+ return PTR_ERR(priv->xdp_prog);
+ }
+
+ if (netif_running(dev))
+ netsec_netdev_open(dev);
+
+ return 0;
+}
+
+static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
+{
+ struct netsec_priv *priv = netdev_priv(ndev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return netsec_xdp_setup(priv, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops netsec_netdev_ops = {
.ndo_init = netsec_netdev_init,
.ndo_uninit = netsec_netdev_uninit,
@@ -1430,6 +1622,7 @@ static const struct net_device_ops netsec_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = netsec_netdev_ioctl,
+ .ndo_bpf = netsec_xdp,
};
static int netsec_of_probe(struct platform_device *pdev,
--
2.7.4
Powered by blists - more mailing lists