lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 22 Oct 2019 16:39:07 +0100
From:   Charles McLachlan <cmclachlan@...arflare.com>
To:     <davem@...emloft.net>
CC:     <netdev@...r.kernel.org>, <linux-net-drivers@...arflare.com>,
        <brouer@...hat.com>
Subject: [PATCH net-next 5/6] sfc: handle XDP_TX outcomes of XDP eBPF
 programs.

Provide an ndo_xdp_xmit function that uses the XDP tx queue for this
CPU to send the packet.

Signed-off-by: Charles McLachlan <cmclachlan@...arflare.com>
---
 drivers/net/ethernet/sfc/efx.c | 14 +++++++
 drivers/net/ethernet/sfc/efx.h |  3 ++
 drivers/net/ethernet/sfc/rx.c  | 15 +++++++-
 drivers/net/ethernet/sfc/tx.c  | 70 ++++++++++++++++++++++++++++++++++
 4 files changed, 101 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 186c055cd024..3b6c5f093b97 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -228,6 +228,8 @@ static void efx_start_all(struct efx_nic *efx);
 static void efx_stop_all(struct efx_nic *efx);
 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+			u32 flags);
 
 #define EFX_ASSERT_RESET_SERIALISED(efx)		\
 	do {						\
@@ -2644,6 +2646,7 @@ static const struct net_device_ops efx_netdev_ops = {
 #endif
 	.ndo_udp_tunnel_add	= efx_udp_tunnel_add,
 	.ndo_udp_tunnel_del	= efx_udp_tunnel_del,
+	.ndo_xdp_xmit		= efx_xdp_xmit,
 	.ndo_bpf		= efx_xdp
 };
 
@@ -2685,6 +2688,17 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 	}
 }
 
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+			u32 flags)
+{
+	struct efx_nic *efx = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
+}
+
 static void efx_update_name(struct efx_nic *efx)
 {
 	strcpy(efx->name, efx->net_dev->name);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 04fed7c06618..45c7ae4114ec 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -322,4 +322,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
 	return true;
 }
 
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+		       bool flush);
+
 #endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 4a23ffff8ac2..f5febec9acc4 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -652,6 +652,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
 	u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
 	struct efx_rx_queue *rx_queue;
 	struct bpf_prog *xdp_prog;
+	struct xdp_frame *xdpf;
 	struct xdp_buff xdp;
 	u32 xdp_act;
 	s16 offset;
@@ -712,7 +713,19 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
 		break;
 
 	case XDP_TX:
-		return -EOPNOTSUPP;
+		/* Buffer ownership passes to tx on success. */
+		xdpf = convert_to_xdp_frame(&xdp);
+		rc = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
+		if (rc != 1) {
+			efx_free_rx_buffers(rx_queue, rx_buf, 1);
+			if (net_ratelimit())
+				netif_err(efx, rx_err, efx->net_dev,
+					  "XDP TX failed (%d)\n", rc);
+			channel->n_rx_xdp_bad_drops++;
+		} else {
+			channel->n_rx_xdp_tx++;
+		}
+		break;
 
 	case XDP_REDIRECT:
 		rc = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 7602d5506d4b..379052c19b16 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -599,6 +599,76 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 	return NETDEV_TX_OK;
 }
 
+/* Transmit a packet from an XDP buffer
+ *
+ * Returns number of packets sent on success, error code otherwise.
+ * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
+ * (for XDP redirect).
+ */
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+		       bool flush)
+{
+	struct efx_tx_buffer *tx_buffer;
+	struct efx_tx_queue *tx_queue;
+	struct xdp_frame *xdpf;
+	dma_addr_t dma_addr;
+	unsigned int len;
+	int space;
+	int cpu;
+	int i;
+
+	cpu = raw_smp_processor_id();
+
+	if (!efx->xdp_tx_queue_count ||
+	    unlikely(cpu >= efx->xdp_tx_queue_count))
+		return -EINVAL;
+
+	tx_queue = efx->xdp_tx_queues[cpu];
+	if (unlikely(!tx_queue))
+		return -EINVAL;
+
+	if (n && xdpfs) {
+		/* Check for available space. We should never need multiple
+		 * descriptors per frame.
+		 */
+		space = efx->txq_entries +
+			tx_queue->read_count - tx_queue->insert_count;
+		n = min(n, space);
+
+		for (i = 0; i < n; i++) {
+			xdpf = xdpfs[i];
+
+			/* We'll want a descriptor for this tx. */
+			prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
+
+			len = xdpf->len;
+
+			/* Map for DMA. */
+			dma_addr = dma_map_single(&efx->pci_dev->dev,
+						  xdpf->data, len,
+						  DMA_TO_DEVICE);
+			if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
+				return -EIO;
+
+			/*  Create descriptor and set up for unmapping DMA. */
+			tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+			tx_buffer->xdpf = xdpf;
+			tx_buffer->flags = EFX_TX_BUF_XDP |
+					   EFX_TX_BUF_MAP_SINGLE;
+			tx_buffer->dma_offset = 0;
+			tx_buffer->unmap_len = len;
+		}
+	}
+
+	/* Pass to hardware. */
+	if (flush)
+		efx_nic_push_buffers(tx_queue);
+
+	tx_queue->tx_packets += n;
+
+	return n;
+}
+
 /* Remove packets from the TX queue
  *
  * This removes packets from the TX queue, up to and including the

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ