[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zqo66ZPaD2GtLZwg@LQ3V64L9R2>
Date: Wed, 31 Jul 2024 14:23:53 +0100
From: Joe Damato <jdamato@...tly.com>
To: Jijie Shao <shaojijie@...wei.com>
Cc: yisen.zhuang@...wei.com, salil.mehta@...wei.com, davem@...emloft.net,
edumazet@...gle.com, kuba@...nel.org, pabeni@...hat.com,
horms@...nel.org, shenjian15@...wei.com, wangpeiyang1@...wei.com,
liuyonglong@...wei.com, sudongming1@...wei.com,
xujunsheng@...wei.com, shiyongbang@...wei.com,
netdev@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [RFC PATCH net-next 07/10] net: hibmcge: Implement rx_poll
function to receive packets
On Wed, Jul 31, 2024 at 05:42:42PM +0800, Jijie Shao wrote:
> Implement rx_poll function to read the rx descriptor after
> receiving the rx interrupt. Adjust the skb based on the
> descriptor to complete the reception of the packet.
>
> Signed-off-by: Jijie Shao <shaojijie@...wei.com>
> ---
> .../ethernet/hisilicon/hibmcge/hbg_common.h | 5 +
> .../net/ethernet/hisilicon/hibmcge/hbg_hw.c | 10 ++
> .../net/ethernet/hisilicon/hibmcge/hbg_hw.h | 1 +
> .../net/ethernet/hisilicon/hibmcge/hbg_irq.c | 9 +-
> .../net/ethernet/hisilicon/hibmcge/hbg_main.c | 2 +
> .../net/ethernet/hisilicon/hibmcge/hbg_reg.h | 2 +
> .../hisilicon/hibmcge/hbg_reg_union.h | 65 ++++++++
> .../net/ethernet/hisilicon/hibmcge/hbg_txrx.c | 157 +++++++++++++++++-
> 8 files changed, 248 insertions(+), 3 deletions(-)
> diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
> index 8efeea9b0c26..bb5f8321da8a 100644
> --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
> +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
> @@ -36,6 +36,7 @@ static int hbg_net_open(struct net_device *dev)
> return 0;
>
> netif_carrier_off(dev);
> + napi_enable(&priv->rx_ring.napi);
> napi_enable(&priv->tx_ring.napi);
> hbg_enable_intr(priv, true);
> hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
In the future, it might be good to consider using:
- netif_napi_set_irq
- netif_queue_set_napi
to link NAPIs with IRQs and queues.
For an example, see 64b62146ba9e ("net/mlx4: link NAPI instances to
queues and IRQs).
[...]
> +static int hbg_rx_fill_buffers(struct hbg_priv *priv)
> +{
> + struct hbg_ring *ring = &priv->rx_ring;
> + int ret;
> +
> + while (!(hbg_fifo_is_full(priv, ring->dir) ||
> + hbg_queue_is_full(ring->ntc, ring->ntu, ring))) {
> + ret = hbg_rx_fill_one_buffer(priv);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
> + struct hbg_buffer *buffer)
> +{
> + struct hbg_rx_desc *rx_desc;
> +
> + /* make sure HW write desc complete */
> + dma_rmb();
> +
> + dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
> + buffer->skb_len, DMA_FROM_DEVICE);
> +
> + rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
> + return rx_desc->len != 0;
> +}
Have you looked into using the page pool to simplify some of the
logic above?
> +static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
> +{
> + struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
> + struct hbg_priv *priv = ring->priv;
> + struct hbg_rx_desc *rx_desc;
> + struct hbg_buffer *buffer;
> + u32 packet_done = 0;
> +
> + if (unlikely(!hbg_nic_is_open(priv))) {
> + napi_complete(napi);
> + return 0;
> + }
> +
> + while (packet_done < budget) {
> + if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu)))
> + break;
> +
> + buffer = &ring->queue[ring->ntc];
> + if (unlikely(!buffer->skb))
> + goto next_buffer;
> +
> + if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
> + break;
> +
> + hbg_dma_unmap(buffer);
> +
> + rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
> + skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
> + skb_put(buffer->skb, rx_desc->len);
> + buffer->skb->protocol = eth_type_trans(buffer->skb, priv->netdev);
> +
> + priv->netdev->stats.rx_bytes += rx_desc->len;
> + priv->netdev->stats.rx_packets++;
> + netif_receive_skb(buffer->skb);
Any reason why not napi_gro_receive ?
> + buffer->skb = NULL;
> + hbg_rx_fill_one_buffer(priv);
> +
> +next_buffer:
> + hbg_queue_move_next(ntc, ring);
> + packet_done++;
> + }
> +
> + hbg_rx_fill_buffers(priv);
> + if (packet_done >= budget)
> + return packet_done;
> +
> + napi_complete(napi);
Maybe:
if (napi_complete_done(napi))
hbg_irq_enable(priv, HBG_IRQ_RX, true);
> + hbg_irq_enable(priv, HBG_IRQ_RX, true);
> +
> + return packet_done;
> +}
[...]
Powered by blists - more mailing lists