[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260119224217.1362965-1-kuba@kernel.org>
Date: Mon, 19 Jan 2026 14:42:17 -0800
From: Jakub Kicinski <kuba@...nel.org>
To: xuanzhuo@...ux.alibaba.com
Cc: Jakub Kicinski <kuba@...nel.org>,
davem@...emloft.net,
netdev@...r.kernel.org,
andrew@...n.ch,
andrew+netdev@...n.ch,
dust.li@...ux.alibaba.com,
pabeni@...hat.com,
vadim.fedorenko@...ux.dev,
dong100@...se.com,
guwen@...ux.alibaba.com,
lorenzo@...nel.org,
edumazet@...gle.com,
lulie@...ux.alibaba.com,
lukas.bulwahn@...hat.com
Subject: Re: [net-next,v20,4/6] eea: create/destroy rx,tx queues for netdevice open and stop
This is an AI-generated review of your patch. The human sending this
email has considered the AI review valid, or at least plausible.
For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
---
eea: create/destroy rx,tx queues for netdevice open and stop
This commit adds netdevice open and stop operations for the EEA driver,
including TX and RX queue allocation/deallocation and NAPI setup.
> diff --git a/drivers/net/ethernet/alibaba/eea/eea_tx.c b/drivers/net/ethernet/alibaba/eea/eea_tx.c
> new file mode 100644
> index 000000000000..f3e648960918
> --- /dev/null
> +++ b/drivers/net/ethernet/alibaba/eea/eea_tx.c
[ ... ]
> +static void eea_tx_meta_put_and_unmap(struct eea_net_tx *tx,
> + struct eea_tx_meta *meta)
> +{
> + struct eea_tx_meta *head;
> +
> + head = meta;
> +
> + while (true) {
> + dma_unmap_single(tx->dma_dev, meta->dma_addr,
> + meta->dma_len, DMA_TO_DEVICE);
> +
> + meta->data = NULL;
> +
> + if (meta->next) {
> + meta = meta->next;
> + continue;
> + }
> +
> + break;
> + }
> +
> + meta->next = tx->free;
> + tx->free = head;
> +}
> +
> +static void eea_meta_free_xmit(struct eea_net_tx *tx,
> + struct eea_tx_meta *meta,
> + bool in_napi,
> + struct eea_tx_cdesc *desc,
> + struct eea_sq_free_stats *stats)
> +{
> + struct sk_buff *skb = meta->skb;
> +
> + if (!skb) {
> + netdev_err(tx->enet->netdev,
> + "tx meta->skb is null. id %d num: %d\n",
> + meta->id, meta->num);
> + return;
> + }
> +
> + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && desc)) {
> + struct skb_shared_hwtstamps ts = {};
> +
> + ts.hwtstamp = EEA_DESC_TS(desc) + tx->enet->hw_ts_offset;
> + skb_tstamp_tx(skb, &ts);
> + }
> +
> + stats->bytes += meta->skb->len;
> + napi_consume_skb(meta->skb, in_napi);
> +}
[ ... ]
> +static u32 eea_clean_tx(struct eea_net_tx *tx)
> +{
> + struct eea_sq_free_stats stats = {0};
> + struct eea_tx_cdesc *desc;
> + struct eea_tx_meta *meta;
> +
> + while ((desc = ering_cq_get_desc(tx->ering))) {
> + ++stats.packets;
> +
> + meta = &tx->meta[le16_to_cpu(desc->id)];
> +
> + eea_meta_free_xmit(tx, meta, true, desc, &stats);
> +
> + ering_cq_ack_desc(tx->ering, meta->num);
> + eea_tx_meta_put_and_unmap(tx, meta);
> + }
> +
> + return stats.packets;
> +}
[ ... ]
> +static int eea_tx_post_skb(struct eea_net_tx *tx, struct sk_buff *skb)
> +{
> + const struct skb_shared_info *shinfo = skb_shinfo(skb);
> + u32 hlen = skb_headlen(skb);
> + struct eea_tx_meta *meta;
> + dma_addr_t addr;
> + int i, err;
> + u16 flags;
> +
> + addr = dma_map_single(tx->dma_dev, skb->data, hlen, DMA_TO_DEVICE);
> + if (unlikely(dma_mapping_error(tx->dma_dev, addr)))
> + return -ENOMEM;
[ ... ]
> +static void eea_free_meta(struct eea_net_tx *tx, struct eea_net_cfg *cfg)
> +{
> + struct eea_sq_free_stats stats;
> + struct eea_tx_meta *meta;
> + int i;
> +
> + while ((meta = eea_tx_meta_get(tx)))
> + meta->skb = NULL;
> +
> + for (i = 0; i < cfg->tx_ring_depth; i++) {
> + meta = &tx->meta[i];
> +
> + if (!meta->skb)
> + continue;
> +
> + eea_meta_free_xmit(tx, meta, false, NULL, &stats);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In eea_clean_tx(), after calling eea_meta_free_xmit(), there is a call to
eea_tx_meta_put_and_unmap() which performs dma_unmap_single() for the DMA
mappings.
Here in eea_free_meta(), only eea_meta_free_xmit() is called but
eea_tx_meta_put_and_unmap() is not. The DMA mappings created in
eea_tx_post_skb() via dma_map_single() and skb_frag_dma_map() would not
be unmapped when the device is closed with pending TX packets. Does this
leak DMA mappings?
> +
> + meta->skb = NULL;
> + }
> +
> + kvfree(tx->meta);
> + tx->meta = NULL;
> +}
--
pw-bot: cr
Powered by blists - more mailing lists