lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190627220836.2572684-3-jonathan.lemon@gmail.com>
Date:   Thu, 27 Jun 2019 15:08:32 -0700
From:   Jonathan Lemon <jonathan.lemon@...il.com>
To:     <netdev@...r.kernel.org>, <bjorn.topel@...el.com>,
        <magnus.karlsson@...el.com>, <saeedm@...lanox.com>,
        <maximmi@...lanox.com>, <brouer@...hat.com>
CC:     <kernel-team@...com>
Subject: [PATCH 2/6 bpf-next] Clean up xsk reuseq API

The reuseq is actually a recycle stack, only accessed from the kernel side.
Also, the implementation details of the stack should belong to the umem
object, and not exposed to the caller.

Clean up and rename for consistency in preparation for the next patch.

Signed-off-by: Jonathan Lemon <jonathan.lemon@...il.com>
---
 drivers/net/ethernet/intel/i40e/i40e_xsk.c    |  8 +--
 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c  |  8 +--
 .../ethernet/mellanox/mlx5/core/en/xsk/rx.c   |  2 +-
 .../ethernet/mellanox/mlx5/core/en/xsk/umem.c |  7 +--
 include/net/xdp_sock.h                        | 23 ++------
 net/xdp/xdp_umem.c                            |  2 +-
 net/xdp/xsk_queue.c                           | 56 +++++++------------
 net/xdp/xsk_queue.h                           |  2 +-
 8 files changed, 33 insertions(+), 75 deletions(-)

diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 3b84fca1c11d..7efe5905b0af 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -78,7 +78,6 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
 				u16 qid)
 {
 	struct net_device *netdev = vsi->netdev;
-	struct xdp_umem_fq_reuse *reuseq;
 	bool if_running;
 	int err;
 
@@ -92,12 +91,9 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
 	    qid >= netdev->real_num_tx_queues)
 		return -EINVAL;
 
-	reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
-	if (!reuseq)
+	if (!xsk_umem_recycle_alloc(umem, vsi->rx_rings[0]->count))
 		return -ENOMEM;
 
-	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
 	err = i40e_xsk_umem_dma_map(vsi, umem);
 	if (err)
 		return err;
@@ -811,7 +807,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
 		if (!rx_bi->addr)
 			continue;
 
-		xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
+		xsk_umem_recycle_addr(rx_ring->xsk_umem, rx_bi->handle);
 		rx_bi->addr = NULL;
 	}
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index a9cab3271ac9..1e09fa7ffb90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -67,7 +67,6 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
 				 u16 qid)
 {
 	struct net_device *netdev = adapter->netdev;
-	struct xdp_umem_fq_reuse *reuseq;
 	bool if_running;
 	int err;
 
@@ -78,12 +77,9 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
 	    qid >= netdev->real_num_tx_queues)
 		return -EINVAL;
 
-	reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
-	if (!reuseq)
+	if (!xsk_umem_recycle_alloc(umem, adapter->rx_ring[0]->count))
 		return -ENOMEM;
 
-	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
 	err = ixgbe_xsk_umem_dma_map(adapter, umem);
 	if (err)
 		return err;
@@ -554,7 +550,7 @@ void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
 	struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
 
 	while (i != rx_ring->next_to_alloc) {
-		xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
+		xsk_umem_recycle_addr(rx_ring->xsk_umem, bi->handle);
 		i++;
 		bi++;
 		if (i == rx_ring->count) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 85440b1c1c3f..8d24eaa660a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -44,7 +44,7 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
 
 static inline void mlx5e_xsk_recycle_frame(struct mlx5e_rq *rq, u64 handle)
 {
-	xsk_umem_fq_reuse(rq->umem, handle);
+	xsk_umem_recycle_addr(rq->umem, handle);
 }
 
 /* XSKRQ uses pages from UMEM, they must not be released. They are returned to
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
index 4baaa5788320..6f75ac653697 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
@@ -243,13 +243,8 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
 
 int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries)
 {
-	struct xdp_umem_fq_reuse *reuseq;
-
-	reuseq = xsk_reuseq_prepare(nentries);
-	if (unlikely(!reuseq))
+	if (!xsk_umem_recycle_alloc(umem, nentries))
 		return -ENOMEM;
-	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
-
 	return 0;
 }
 
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 39516d960636..7df7b417ac53 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -83,10 +83,7 @@ void xsk_umem_discard_addr(struct xdp_umem *umem);
 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
-struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
-struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
-					  struct xdp_umem_fq_reuse *newq);
-void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
+bool xsk_umem_recycle_alloc(struct xdp_umem *umem, u32 nentries);
 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
 
 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
@@ -131,7 +128,7 @@ static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
 		rq->length--;
 }
 
-static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
+static inline void xsk_umem_recycle_addr(struct xdp_umem *umem, u64 addr)
 {
 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
 
@@ -185,19 +182,9 @@ static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
 {
 }
 
-static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
-{
-	return NULL;
-}
-
-static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
-	struct xdp_umem *umem,
-	struct xdp_umem_fq_reuse *newq)
-{
-	return NULL;
-}
-static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
+static inline bool xsk_umem_recycle_alloc(struct xdp_umem *umem, u32 nentries)
 {
+	return false;
 }
 
 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
@@ -230,7 +217,7 @@ static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
 {
 }
 
-static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
+static inline void xsk_umem_recycle_addr(struct xdp_umem *umem, u64 addr)
 {
 }
 
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 9c6de4f114f8..2150ba3fc744 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -208,7 +208,7 @@ static void xdp_umem_release(struct xdp_umem *umem)
 		umem->cq = NULL;
 	}
 
-	xsk_reuseq_destroy(umem);
+	xsk_umem_recycle_destroy(umem);
 
 	xdp_umem_unpin_pages(umem);
 
diff --git a/net/xdp/xsk_queue.c b/net/xdp/xsk_queue.c
index b66504592d9b..116b28622297 100644
--- a/net/xdp/xsk_queue.c
+++ b/net/xdp/xsk_queue.c
@@ -65,55 +65,39 @@ void xskq_destroy(struct xsk_queue *q)
 	kfree(q);
 }
 
-struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
+bool xsk_umem_recycle_alloc(struct xdp_umem *umem, u32 nentries)
 {
-	struct xdp_umem_fq_reuse *newq;
+	struct xdp_umem_fq_reuse *newq, *oldq;
 
 	/* Check for overflow */
 	if (nentries > (u32)roundup_pow_of_two(nentries))
-		return NULL;
+		return false;
 	nentries = roundup_pow_of_two(nentries);
+	oldq = umem->fq_reuse;
+	if (oldq && (oldq->length >= nentries ||
+		     oldq->nentries == nentries))
+		return true;
 
 	newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
 	if (!newq)
-		return NULL;
-	memset(newq, 0, offsetof(typeof(*newq), handles));
-
-	newq->nentries = nentries;
-	return newq;
-}
-EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
-
-struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
-					  struct xdp_umem_fq_reuse *newq)
-{
-	struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
-
-	if (!oldq) {
-		umem->fq_reuse = newq;
-		return NULL;
+		return false;
+	if (oldq) {
+		memcpy(newq->handles, oldq->handles,
+		       array_size(oldq->length, sizeof(u64)));
+		newq->length = oldq->length;
+		kvfree(oldq);
+	} else {
+		newq->length = 0;
 	}
-
-	if (newq->nentries < oldq->length)
-		return newq;
-
-	memcpy(newq->handles, oldq->handles,
-	       array_size(oldq->length, sizeof(u64)));
-	newq->length = oldq->length;
-
+	newq->nentries = nentries;
 	umem->fq_reuse = newq;
-	return oldq;
-}
-EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
 
-void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
-{
-	kvfree(rq);
+	return true;
 }
-EXPORT_SYMBOL_GPL(xsk_reuseq_free);
+EXPORT_SYMBOL_GPL(xsk_umem_recycle_alloc);
 
-void xsk_reuseq_destroy(struct xdp_umem *umem)
+void xsk_umem_recycle_destroy(struct xdp_umem *umem)
 {
-	xsk_reuseq_free(umem->fq_reuse);
+	kvfree(umem->fq_reuse);
 	umem->fq_reuse = NULL;
 }
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 12b49784a6d5..c9db4e8ae80e 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -321,6 +321,6 @@ struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
 void xskq_destroy(struct xsk_queue *q_ops);
 
 /* Executed by the core when the entire UMEM gets freed */
-void xsk_reuseq_destroy(struct xdp_umem *umem);
+void xsk_umem_recycle_destroy(struct xdp_umem *umem);
 
 #endif /* _LINUX_XSK_QUEUE_H */
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ