[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230917084728.GI1125562@kernel.org>
Date: Sun, 17 Sep 2023 10:47:28 +0200
From: Simon Horman <horms@...nel.org>
To: Lorenzo Bianconi <lorenzo@...nel.org>
Cc: netdev@...r.kernel.org, lorenzo.bianconi@...hat.com, nbd@....name,
john@...ozen.org, sean.wang@...iatek.com, Mark-MC.Lee@...iatek.com,
davem@...emloft.net, edumazet@...gle.com, kuba@...nel.org,
pabeni@...hat.com, daniel@...rotopia.org,
linux-mediatek@...ts.infradead.org, sujuan.chen@...iatek.com,
robh+dt@...nel.org, krzysztof.kozlowski+dt@...aro.org,
devicetree@...r.kernel.org
Subject: Re: [PATCH net-next 13/15] net: ethernet: mtk_wed: introduce hw_rro
support for MT7988
On Thu, Sep 14, 2023 at 04:38:18PM +0200, Lorenzo Bianconi wrote:
> From: Sujuan Chen <sujuan.chen@...iatek.com>
>
> MT7988 SoC support 802.11 receive reordering offload in hw while
> MT7986 SoC implements it through the firmware running on the mcu.
>
> Co-developed-by: Lorenzo Bianconi <lorenzo@...nel.org>
> Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
> Signed-off-by: Sujuan Chen <sujuan.chen@...iatek.com>
...
Hi Lorenzo,
some minor feedback from my side.
> @@ -565,6 +565,73 @@ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
> kfree(page_list);
> }
>
> +static int
> +mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
> +{
> + int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
> + struct mtk_wed_buf *page_list;
> + struct mtk_wed_bm_desc *desc;
> + dma_addr_t desc_phys;
> + int i, page_idx = 0;
> +
> + if (!dev->wlan.hw_rro)
> + return 0;
> +
> + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
> + if (!page_list)
> + return -ENOMEM;
> +
> + dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
> + dev->hw_rro.pages = page_list;
> + desc = dma_alloc_coherent(dev->hw->dev,
> + dev->wlan.rx_nbuf * sizeof(*desc),
> + &desc_phys, GFP_KERNEL);
> + if (!desc)
> + return -ENOMEM;
> +
> + dev->hw_rro.desc = desc;
> + dev->hw_rro.desc_phys = desc_phys;
> +
> + for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
> + dma_addr_t page_phys, buf_phys;
> + struct page *page;
> + void *buf;
> + int s;
> +
> + page = __dev_alloc_page(GFP_KERNEL);
> + if (!page)
> + return -ENOMEM;
> +
> + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
> + DMA_BIDIRECTIONAL);
> + if (dma_mapping_error(dev->hw->dev, page_phys)) {
> + __free_page(page);
> + return -ENOMEM;
> + }
> +
> + page_list[page_idx].p = page;
> + page_list[page_idx++].phy_addr = page_phys;
> + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
> + DMA_BIDIRECTIONAL);
> +
> + buf = page_to_virt(page);
> + buf_phys = page_phys;
> +
> + for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
> + desc->buf0 = cpu_to_le32(buf_phys);
> + desc++;
> +
> + buf += MTK_WED_PAGE_BUF_SIZE;
clang-16 W=1 warns that buf is set but otherwise unused in this function.
> + buf_phys += MTK_WED_PAGE_BUF_SIZE;
> + }
> +
> + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
> + DMA_BIDIRECTIONAL);
> + }
> +
> + return 0;
> +}
> +
> static int
> mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
> {
...
Powered by blists - more mailing lists