[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190628221555.3009654-4-jonathan.lemon@gmail.com>
Date: Fri, 28 Jun 2019 15:15:55 -0700
From: Jonathan Lemon <jonathan.lemon@...il.com>
To: <netdev@...r.kernel.org>, <bjorn.topel@...el.com>,
<magnus.karlsson@...el.com>, <jakub.kicinski@...ronome.com>,
<jeffrey.t.kirsher@...el.com>
CC: <kernel-team@...com>
Subject: [PATCH 3/3 bpf-next] ixgbe: Support zero-copy XDP_TX on the RX path for AF_XDP sockets.
When the XDP program attached to a zero-copy AF_XDP socket returns XDP_TX,
queue the umem frame on the XDP TX ring. Space on the recycle stack is
pre-allocated when the xsk is created. (taken from tx_ring, since the
xdp ring is not initialized yet)
Signed-off-by: Jonathan Lemon <jonathan.lemon@...il.com>
---
drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 +
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 74 +++++++++++++++++---
2 files changed, 67 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 39e73ad60352..aca33e4773f5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -150,6 +150,7 @@ enum ixgbe_tx_flags {
/* software defined flags */
IXGBE_TX_FLAGS_SW_VLAN = 0x80,
IXGBE_TX_FLAGS_FCOE = 0x100,
+ IXGBE_TX_FLAGS_ZC_FRAME = 0x200,
};
/* VLAN info */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 65feb16200ea..c7a661736ab8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -77,7 +77,8 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- if (!xsk_umem_recycle_alloc(umem, adapter->rx_ring[0]->count))
+ if (!xsk_umem_recycle_alloc(umem, adapter->rx_ring[0]->count +
+ adapter->tx_ring[0]->count))
return -ENOMEM;
err = ixgbe_xsk_umem_dma_map(adapter, umem);
@@ -135,13 +136,70 @@ int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
ixgbe_xsk_umem_disable(adapter, qid);
}
+static int ixgbe_xmit_rcvd_zc(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct xdp_frame *xdpf;
+ u32 len, cmd_type;
+ dma_addr_t dma;
+ u16 i;
+
+ if (unlikely(!ixgbe_desc_unused(ring)))
+ return IXGBE_XDP_CONSUMED;
+ xdpf = convert_to_xdp_frame_keep_zc(xdp);
+ if (unlikely(!xdpf))
+ return IXGBE_XDP_CONSUMED;
+ xdpf->handle = xdp->handle;
+ len = xdpf->len;
+
+ dma = xdp_umem_get_dma(rx_ring->xsk_umem, xdp->handle);
+
+ /* record the location of the first descriptor for this packet */
+ tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+ tx_buffer->xdpf = xdpf;
+ tx_buffer->tx_flags = IXGBE_TX_FLAGS_ZC_FRAME;
+
+ i = ring->next_to_use;
+ tx_desc = IXGBE_TX_DESC(ring, i);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ /* Avoid any potential race with xdp_xmit and cleanup */
+ smp_wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ i++;
+ if (i == ring->count)
+ i = 0;
+
+ tx_buffer->next_to_watch = tx_desc;
+ ring->next_to_use = i;
+
+ return IXGBE_XDP_TX;
+}
+
static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
- struct xdp_frame *xdpf;
u32 act;
rcu_read_lock();
@@ -152,12 +210,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
case XDP_PASS:
break;
case XDP_TX:
- xdpf = convert_to_xdp_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ result = ixgbe_xmit_rcvd_zc(adapter, rx_ring, xdp);
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
@@ -576,6 +629,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *tx_bi)
{
+ if (tx_bi->tx_flags & IXGBE_TX_FLAGS_ZC_FRAME) {
+ xsk_umem_recycle_addr(tx_ring->xsk_umem, tx_bi->xdpf->handle);
+ tx_bi->tx_flags = 0;
+ return;
+ }
xdp_return_frame(tx_bi->xdpf);
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_bi, dma),
--
2.17.1
Powered by blists - more mailing lists