[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230928113509.GF24230@kernel.org>
Date: Thu, 28 Sep 2023 13:35:09 +0200
From: Simon Horman <horms@...nel.org>
To: Sieng-Piaw Liew <liew.s.piaw@...il.com>
Cc: chris.snook@...il.com, davem@...emloft.net, kuba@...nel.org,
pabeni@...hat.com, edumazet@...gle.com, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH net-next] net: atl1c: switch to napi_consume_skb()
+ Eric Dumazet
On Thu, Sep 21, 2023 at 08:56:23AM +0800, Sieng-Piaw Liew wrote:
> Switch to napi_consume_skb() to take advantage of bulk free, and skb
> reuse through skb cache in conjunction with napi_build_skb().
>
> When parameter 'budget' = 0, indicating non-NAPI context,
> dev_consume_skb_any() is called internally.
>
> Signed-off-by: Sieng-Piaw Liew <liew.s.piaw@...il.com>
Reviewed-by: Simon Horman <horms@...nel.org>
> ---
> drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 13 +++++++------
> 1 file changed, 7 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
> index 74b78164cf74..46cdc32b4e31 100644
> --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
> +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
> @@ -842,7 +842,8 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
> }
>
> static inline void atl1c_clean_buffer(struct pci_dev *pdev,
> - struct atl1c_buffer *buffer_info)
> + struct atl1c_buffer *buffer_info,
> + int budget)
> {
> u16 pci_driection;
> if (buffer_info->flags & ATL1C_BUFFER_FREE)
> @@ -861,7 +862,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
> buffer_info->length, pci_driection);
> }
> if (buffer_info->skb)
> - dev_consume_skb_any(buffer_info->skb);
> + napi_consume_skb(buffer_info->skb, budget);
> buffer_info->dma = 0;
> buffer_info->skb = NULL;
> ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
> @@ -882,7 +883,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
> ring_count = tpd_ring->count;
> for (index = 0; index < ring_count; index++) {
> buffer_info = &tpd_ring->buffer_info[index];
> - atl1c_clean_buffer(pdev, buffer_info);
> + atl1c_clean_buffer(pdev, buffer_info, 0);
> }
>
> netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
> @@ -909,7 +910,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue)
>
> for (j = 0; j < rfd_ring->count; j++) {
> buffer_info = &rfd_ring->buffer_info[j];
> - atl1c_clean_buffer(pdev, buffer_info);
> + atl1c_clean_buffer(pdev, buffer_info, 0);
> }
> /* zero out the descriptor ring */
> memset(rfd_ring->desc, 0, rfd_ring->size);
> @@ -1607,7 +1608,7 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
> total_bytes += buffer_info->skb->len;
> total_packets++;
> }
> - atl1c_clean_buffer(pdev, buffer_info);
> + atl1c_clean_buffer(pdev, buffer_info, budget);
> if (++next_to_clean == tpd_ring->count)
> next_to_clean = 0;
> atomic_set(&tpd_ring->next_to_clean, next_to_clean);
> @@ -2151,7 +2152,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
> while (index != tpd_ring->next_to_use) {
> tpd = ATL1C_TPD_DESC(tpd_ring, index);
> buffer_info = &tpd_ring->buffer_info[index];
> - atl1c_clean_buffer(adpt->pdev, buffer_info);
> + atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
> memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
> if (++index == tpd_ring->count)
> index = 0;
> --
> 2.34.1
>
>
Powered by blists - more mailing lists