[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z73pMXNsYprCcbmk@LQ3V64L9R2>
Date: Tue, 25 Feb 2025 11:00:49 -0500
From: Joe Damato <jdamato@...tly.com>
To: Gur Stavi <gur.stavi@...wei.com>
Cc: Fan Gong <gongfan1@...wei.com>, netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>,
Andrew Lunn <andrew+netdev@...n.ch>, linux-doc@...r.kernel.org,
Jonathan Corbet <corbet@....net>,
Bjorn Helgaas <helgaas@...nel.org>,
Cai Huoqing <cai.huoqing@...ux.dev>, luosifu <luosifu@...wei.com>,
Xin Guo <guoxin09@...wei.com>,
Shen Chenyang <shenchenyang1@...ilicon.com>,
Zhou Shuai <zhoushuai28@...wei.com>, Wu Like <wulike1@...wei.com>,
Shi Jing <shijing34@...wei.com>,
Meny Yossefi <meny.yossefi@...wei.com>,
Suman Ghosh <sumang@...vell.com>,
Przemek Kitszel <przemyslaw.kitszel@...el.com>
Subject: Re: [PATCH net-next v06 1/1] hinic3: module initialization and tx/rx
logic
On Tue, Feb 25, 2025 at 04:53:30PM +0200, Gur Stavi wrote:
> From: Fan Gong <gongfan1@...wei.com>
>
> This is [1/3] part of hinic3 Ethernet driver initial submission.
> With this patch hinic3 is a valid kernel module but non-functional
> driver.
IMHO, there's a huge amount of code so it makes reviewing pretty
difficult.
Is there no way to split this into multiple smaller patches? I am
sure his was asked and answered in a previous thread that I missed.
I took a quick pass over the code, but probably missed many things
due to the large amount of code in a single patch.
[...]
> +static void init_intr_coal_param(struct net_device *netdev)
> +{
> + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
> + struct hinic3_intr_coal_info *info;
> + u16 i;
> +
> + for (i = 0; i < nic_dev->max_qps; i++) {
> + info = &nic_dev->intr_coalesce[i];
> + info->pending_limt = HINIC3_DEAULT_TXRX_MSIX_PENDING_LIMIT;
> + info->coalesce_timer_cfg = HINIC3_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
> + info->resend_timer_cfg = HINIC3_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
> + }
> +}
> +
> +static int hinic3_init_intr_coalesce(struct net_device *netdev)
> +{
> + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
> + struct hinic3_hwdev *hwdev = nic_dev->hwdev;
> + u64 size;
> +
> + size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps;
> + if (!size) {
> + dev_err(hwdev->dev, "Cannot allocate zero size intr coalesce\n");
> + return -EINVAL;
> + }
> + nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL);
> + if (!nic_dev->intr_coalesce)
> + return -ENOMEM;
> +
> + init_intr_coal_param(netdev);
> + return 0;
> +}
> +
> +static void hinic3_free_intr_coalesce(struct net_device *netdev)
> +{
> + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
> +
> + kfree(nic_dev->intr_coalesce);
> +}
Do you need the IRQ coalescing code in this version of the patch? It
looks like hinic3_alloc_rxqs is unimplemented... so it's a bit
confusing to see code for IRQ coalescing but none for queue
allocation ?
> diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
> new file mode 100644
> index 000000000000..4a166c13eb38
> --- /dev/null
> +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
> @@ -0,0 +1,24 @@
> +// SPDX-License-Identifier: GPL-2.0
> +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
> +
> +#include "hinic3_hwdev.h"
> +#include "hinic3_hwif.h"
> +#include "hinic3_nic_cfg.h"
> +#include "hinic3_nic_dev.h"
> +#include "hinic3_rss.h"
> +
> +void hinic3_clear_rss_config(struct net_device *netdev)
> +{
> + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
> +
> + kfree(nic_dev->rss_hkey);
> + nic_dev->rss_hkey = NULL;
> +
> + kfree(nic_dev->rss_indir);
> + nic_dev->rss_indir = NULL;
> +}
Do you need the above code in hinic3_clear_rss_config?
I probably missed it but hinic3_try_to_enable_rss is empty, so I'm
not sure why you'd need to implement the de-allocaion of the
rss_hkey and rss_indir in this patch ?
> +static void hinic3_reuse_rx_page(struct hinic3_rxq *rxq,
> + struct hinic3_rx_info *old_rx_info)
> +{
> + struct hinic3_rx_info *new_rx_info;
> + u16 nta = rxq->next_to_alloc;
> +
> + new_rx_info = &rxq->rx_info[nta];
> +
> + /* update, and store next to alloc */
> + nta++;
> + rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0;
> +
> + new_rx_info->page = old_rx_info->page;
> + new_rx_info->page_offset = old_rx_info->page_offset;
> + new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr;
> +
> + /* sync the buffer for use by the device */
> + dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr,
> + new_rx_info->page_offset,
> + rxq->buf_len,
> + DMA_FROM_DEVICE);
> +}
Are you planning to use the page pool in future revisions to
simplify the code ?
> +static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
> + struct hinic3_rx_info *rx_info,
> + struct sk_buff *skb, u32 size)
> +{
> + struct page *page;
> + u8 *va;
> +
> + page = rx_info->page;
> + va = (u8 *)page_address(page) + rx_info->page_offset;
> + prefetch(va);
net_prefetch ?
> +
> + dma_sync_single_range_for_cpu(rxq->dev,
> + rx_info->buf_dma_addr,
> + rx_info->page_offset,
> + rxq->buf_len,
> + DMA_FROM_DEVICE);
> +
> + if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
> + memcpy(__skb_put(skb, size), va,
> + ALIGN(size, sizeof(long)));
> +
> + /* page is not reserved, we can reuse buffer as-is */
> + if (likely(page_to_nid(page) == numa_node_id()))
> + goto reuse_rx_page;
> +
> + /* this page cannot be reused so discard it */
> + put_page(page);
> + goto err_reuse_buffer;
> + }
> +
> + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
> + rx_info->page_offset, size, rxq->buf_len);
> +
> + /* avoid re-using remote pages */
> + if (unlikely(page_to_nid(page) != numa_node_id()))
> + goto err_reuse_buffer;
> +
> + /* if we are the only owner of the page we can reuse it */
> + if (unlikely(page_count(page) != 1))
> + goto err_reuse_buffer;
Are you planning to use the page pool in future revisions to
simplify the code ?
> +static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq,
> + u32 pkt_len)
> +{
> + struct net_device *netdev = rxq->netdev;
> + struct sk_buff *skb;
> + u32 sge_num;
> +
> + skb = netdev_alloc_skb_ip_align(netdev, HINIC3_RX_HDR_SIZE);
> + if (unlikely(!skb))
> + return NULL;
> +
> + sge_num = hinic3_get_sge_num(rxq, pkt_len);
> +
> + prefetchw(skb->data);
net_prefetchw ?
> +int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
> +{
> + struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
> + u32 sw_ci, status, pkt_len, vlan_len;
> + struct hinic3_rq_cqe *rx_cqe;
> + u32 num_wqe = 0;
> + int nr_pkts = 0;
> + u16 num_lro;
> +
> + while (likely(nr_pkts < budget)) {
> + sw_ci = rxq->cons_idx & rxq->q_mask;
> + rx_cqe = rxq->cqe_arr + sw_ci;
> + status = rx_cqe->status;
> + if (!RQ_CQE_STATUS_GET(status, RXDONE))
> + break;
> +
> + /* make sure we read rx_done before packet length */
> + rmb();
> +
> + vlan_len = rx_cqe->vlan_len;
> + pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
> + if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
> + break;
> +
> + nr_pkts++;
> + num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
> + if (num_lro)
> + num_wqe += hinic3_get_sge_num(rxq, pkt_len);
> +
> + rx_cqe->status = 0;
> +
> + if (num_wqe >= nic_dev->lro_replenish_thld)
> + break;
> + }
> +
> + if (rxq->delta >= HINIC3_RX_BUFFER_WRITE)
> + hinic3_rx_fill_buffers(rxq);
Doesn't this function need to re-enable hw IRQs? Maybe it does
somewhere in one of the helpers and I missed it?
Even so, it should probably be checking napi_complete_done before
re-enabling IRQs and I don't see a call to that anywhere, but maybe
I missed it?
I also don't see any calls to netif_napi_add, so I'm not sure if
this code needs to be included in this patch ?
> +#define HINIC3_BDS_PER_SQ_WQEBB \
> + (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc))
> +
> +int hinic3_tx_poll(struct hinic3_txq *txq, int budget)
> +{
> + struct net_device *netdev = txq->netdev;
> + u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
> + struct hinic3_nic_dev *nic_dev;
> + struct hinic3_tx_info *tx_info;
> + u16 wqebb_cnt = 0;
> + int pkts = 0;
> +
> + nic_dev = netdev_priv(netdev);
> + hw_ci = hinic3_get_sq_hw_ci(txq->sq);
> + dma_rmb();
> + sw_ci = hinic3_get_sq_local_ci(txq->sq);
> +
> + do {
> + tx_info = &txq->tx_info[sw_ci];
> +
> + /* Did all wqebb of this wqe complete? */
> + if (hw_ci == sw_ci ||
> + ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt)
> + break;
> +
> + sw_ci = (sw_ci + tx_info->wqebb_cnt) & (u16)txq->q_mask;
> + prefetch(&txq->tx_info[sw_ci]);
net_prefetch ?
Powered by blists - more mailing lists