[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240118192500.58665-3-shannon.nelson@amd.com>
Date: Thu, 18 Jan 2024 11:24:53 -0800
From: Shannon Nelson <shannon.nelson@....com>
To: <netdev@...r.kernel.org>, <davem@...emloft.net>, <kuba@...nel.org>,
<edumazet@...gle.com>, <pabeni@...hat.com>
CC: <brett.creeley@....com>, <drivers@...sando.io>, Shannon Nelson
<shannon.nelson@....com>
Subject: [PATCH RFC net-next 2/9] ionic: add helpers for accessing buffer info
These helpers clean up some of the code around DMA mapping
and other buffer references, and will be used in the next
few patches for the XDP support.
Signed-off-by: Shannon Nelson <shannon.nelson@....com>
---
.../net/ethernet/pensando/ionic/ionic_txrx.c | 37 ++++++++++++-------
1 file changed, 24 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 54cd96b035d6..19a7a8a8e1b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -88,6 +88,21 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
return netdev_get_tx_queue(q->lif->netdev, q->index);
}
+static inline void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
+{
+ return page_address(buf_info->page) + buf_info->page_offset;
+}
+
+static inline dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
+{
+ return buf_info->dma_addr + buf_info->page_offset;
+}
+
+static inline unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
+{
+ return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
+}
+
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
@@ -207,12 +222,11 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
return NULL;
}
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
len -= frag_len;
dma_sync_single_for_cpu(dev,
- buf_info->dma_addr + buf_info->page_offset,
+ ionic_rx_buf_pa(buf_info),
frag_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -262,10 +276,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return NULL;
}
- dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
+ dma_sync_single_for_cpu(dev, ionic_rx_buf_pa(buf_info),
len, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
- dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
+ skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info), len);
+ dma_sync_single_for_device(dev, ionic_rx_buf_pa(buf_info),
len, DMA_FROM_DEVICE);
skb_put(skb, len);
@@ -452,9 +466,8 @@ void ionic_rx_fill(struct ionic_queue *q)
}
/* fill main descriptor - buf[0] */
- desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
+ desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -472,10 +485,8 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE -
- buf_info->page_offset));
+ sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
--
2.17.1
Powered by blists - more mailing lists