[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210608160118.3767932-10-shayagr@amazon.com>
Date: Tue, 8 Jun 2021 19:01:17 +0300
From: Shay Agroskin <shayagr@...zon.com>
To: David Miller <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>, <netdev@...r.kernel.org>
CC: Shay Agroskin <shayagr@...zon.com>,
"Woodhouse, David" <dwmw@...zon.com>,
"Machulsky, Zorik" <zorik@...zon.com>,
"Matushevsky, Alexander" <matua@...zon.com>,
Saeed Bshara <saeedb@...zon.com>,
"Wilson, Matt" <msw@...zon.com>,
"Liguori, Anthony" <aliguori@...zon.com>,
"Bshara, Nafea" <nafea@...zon.com>,
"Belgazal, Netanel" <netanel@...zon.com>,
"Saidi, Ali" <alisaidi@...zon.com>,
"Herrenschmidt, Benjamin" <benh@...zon.com>,
"Kiyanovski, Arthur" <akiyano@...zon.com>,
"Dagan, Noam" <ndagan@...zon.com>
Subject: [Patch v1 net-next 09/10] net: ena: Use dev_alloc() in RX buffer allocation
Use dev_alloc() when allocating RX buffers instead of specifying the
allocation flags explicitly. This result in same behaviour with less
code.
Also move the page allocation and its DMA mapping into a function. This
creates a logical block, which may help understanding the code.
Signed-off-by: Shay Agroskin <shayagr@...zon.com>
---
drivers/net/ethernet/amazon/ena/ena_netdev.c | 58 ++++++++++++--------
1 file changed, 36 insertions(+), 22 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 37c839401c6c..261680aba33c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -975,8 +975,37 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
ena_free_rx_resources(adapter, i);
}
-static int ena_alloc_rx_page(struct ena_ring *rx_ring,
- struct ena_rx_buffer *rx_info, gfp_t gfp)
+struct page *ena_alloc_map_page(struct ena_ring *rx_ring, dma_addr_t *dma)
+{
+ struct page *page;
+
+ /* This would allocate the page on the same NUMA node the executing code
+ * is running on.
+ */
+ page = dev_alloc_page();
+ if (!page) {
+ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
+ &rx_ring->syncp);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ /* To enable NIC-side port-mirroring, AKA SPAN port,
+ * we make the buffer readable from the nic as well
+ */
+ *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
+ ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
+ &rx_ring->syncp);
+ __free_page(page);
+ return ERR_PTR(-EIO);
+ }
+
+ return page;
+}
+
+static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info)
{
int headroom = rx_ring->rx_headroom;
struct ena_com_buf *ena_buf;
@@ -991,25 +1020,11 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
if (unlikely(rx_info->page))
return 0;
- page = alloc_page(gfp);
- if (unlikely(!page)) {
- ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
- &rx_ring->syncp);
- return -ENOMEM;
- }
-
- /* To enable NIC-side port-mirroring, AKA SPAN port,
- * we make the buffer readable from the nic as well
- */
- dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
- ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
- &rx_ring->syncp);
+ /* We handle DMA here */
+ page = ena_alloc_map_page(rx_ring, &dma);
+ if (unlikely(IS_ERR(page)))
+ return PTR_ERR(page);
- __free_page(page);
- return -EIO;
- }
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"Allocate page %p, rx_info %p\n", page, rx_info);
@@ -1065,8 +1080,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rx_info = &rx_ring->rx_buffer_info[req_id];
- rc = ena_alloc_rx_page(rx_ring, rx_info,
- GFP_ATOMIC | __GFP_COMP);
+ rc = ena_alloc_rx_buffer(rx_ring, rx_info);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate buffer for rx queue %d\n",
--
2.25.1
Powered by blists - more mailing lists