[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250821161420.7c9804f7@kernel.org>
Date: Thu, 21 Aug 2025 16:14:20 -0700
From: Jakub Kicinski <kuba@...nel.org>
To: Vivian Wang <wangruikang@...as.ac.cn>
Cc: Andrew Lunn <andrew+netdev@...n.ch>, Rob Herring <robh@...nel.org>,
Krzysztof Kozlowski <krzk+dt@...nel.org>, Conor Dooley
<conor+dt@...nel.org>, Yixun Lan <dlan@...too.org>, "David S. Miller"
<davem@...emloft.net>, Eric Dumazet <edumazet@...gle.com>, Paolo Abeni
<pabeni@...hat.com>, Philipp Zabel <p.zabel@...gutronix.de>, Paul Walmsley
<paul.walmsley@...ive.com>, Palmer Dabbelt <palmer@...belt.com>, Albert Ou
<aou@...s.berkeley.edu>, Alexandre Ghiti <alex@...ti.fr>, Vivian Wang
<uwu@...m.page>, Vadim Fedorenko <vadim.fedorenko@...ux.dev>, Junhui Liu
<junhui.liu@...moral.tech>, Simon Horman <horms@...nel.org>, Maxime
Chevallier <maxime.chevallier@...tlin.com>, netdev@...r.kernel.org,
devicetree@...r.kernel.org, linux-riscv@...ts.infradead.org,
spacemit@...ts.linux.dev, linux-kernel@...r.kernel.org
Subject: Re: [PATCH net-next v6 2/5] net: spacemit: Add K1 Ethernet MAC
On Wed, 20 Aug 2025 14:47:51 +0800 Vivian Wang wrote:
> +static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
> +{
> + struct emac_desc_ring *tx_ring = &priv->tx_ring;
> + struct emac_desc tx_desc, *tx_desc_addr;
> + struct device *dev = &priv->pdev->dev;
> + struct emac_tx_desc_buffer *tx_buf;
> + u32 head, old_head, frag_num, f;
> + bool buf_idx;
> +
> + frag_num = skb_shinfo(skb)->nr_frags;
> + head = tx_ring->head;
> + old_head = head;
> +
> + for (f = 0; f < frag_num + 1; f++) {
> + buf_idx = f % 2;
> +
> + /*
> + * If using buffer 1, initialize a new desc. Otherwise, use
> + * buffer 2 of previous fragment's desc.
> + */
> + if (!buf_idx) {
> + tx_buf = &tx_ring->tx_desc_buf[head];
> + tx_desc_addr =
> + &((struct emac_desc *)tx_ring->desc_addr)[head];
> + memset(&tx_desc, 0, sizeof(tx_desc));
> +
> + /*
> + * Give ownership for all but first desc initially. For
> + * first desc, give at the end so DMA cannot start
> + * reading uninitialized descs.
> + */
> + if (head != old_head)
> + tx_desc.desc0 |= TX_DESC_0_OWN;
> +
> + if (++head == tx_ring->total_cnt) {
> + /* Just used last desc in ring */
> + tx_desc.desc1 |= TX_DESC_1_END_RING;
> + head = 0;
> + }
> + }
> +
> + if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
> + netdev_err(priv->ndev, "Map TX frag %d failed", f);
> + goto dma_map_err;
> + }
> +
> + if (f == 0)
> + tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
> +
> + if (f == frag_num) {
> + tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
> + tx_buf->skb = skb;
> + if (emac_tx_should_interrupt(priv, frag_num + 1))
> + tx_desc.desc1 |=
> + TX_DESC_1_INTERRUPT_ON_COMPLETION;
> + }
> +
> + *tx_desc_addr = tx_desc;
> + }
> +
> + /* All descriptors are ready, give ownership for first desc */
> + tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
> + dma_wmb();
> + WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
> +
> + emac_dma_start_transmit(priv);
> +
> + tx_ring->head = head;
> +
> + return;
> +
> +dma_map_err:
> + dev_kfree_skb_any(skb);
You free the skb here..
> + priv->ndev->stats.tx_dropped++;
> +}
> +
> +static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
> +{
> + struct emac_priv *priv = netdev_priv(ndev);
> + int nfrags = skb_shinfo(skb)->nr_frags;
> + struct device *dev = &priv->pdev->dev;
> +
> + if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
> + if (!netif_queue_stopped(ndev)) {
> + netif_stop_queue(ndev);
> + dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
> + }
> + return NETDEV_TX_BUSY;
> + }
> +
> + emac_tx_mem_map(priv, skb);
> +
> + ndev->stats.tx_packets++;
> + ndev->stats.tx_bytes += skb->len;
.. and then you use skb here.
> + /* Make sure there is space in the ring for the next TX. */
> + if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
> + netif_stop_queue(ndev);
> +
> + return NETDEV_TX_OK;
> +}
> +static void emac_get_ethtool_stats(struct net_device *dev,
> + struct ethtool_stats *stats, u64 *data)
> +{
> + struct emac_priv *priv = netdev_priv(dev);
> + u64 *rx_stats = (u64 *)&priv->rx_stats;
> + int i;
> +
> + scoped_guard(spinlock_irqsave, &priv->stats_lock) {
Why is this spin lock taken in irqsave mode?
Please convert the code not to use scoped_guard()
There's not a single flow control (return) in any of them.
It's just hiding the information that you're unnecessarily masking irqs.
See
https://www.kernel.org/doc/html/next/process/maintainer-netdev.html#using-device-managed-and-cleanup-h-constructs
> + emac_stats_update(priv);
> +
> + for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
> + data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
> + }
> +static void emac_tx_timeout_task(struct work_struct *work)
> +{
> + struct net_device *ndev;
> + struct emac_priv *priv;
> +
> + priv = container_of(work, struct emac_priv, tx_timeout_task);
> + ndev = priv->ndev;
I don't see this work ever being canceled.
What prevents ndev from being freed before it gets to run?
> +/* Called when net interface is brought up. */
> +static int emac_open(struct net_device *ndev)
> +{
> + struct emac_priv *priv = netdev_priv(ndev);
> + struct device *dev = &priv->pdev->dev;
> + int ret;
> +
> + ret = emac_alloc_tx_resources(priv);
> + if (ret) {
> + dev_err(dev, "Error when setting up the Tx resources\n");
> + goto emac_alloc_tx_resource_fail;
> + }
> +
> + ret = emac_alloc_rx_resources(priv);
> + if (ret) {
> + dev_err(dev, "Error when setting up the Rx resources\n");
> + goto emac_alloc_rx_resource_fail;
> + }
> +
> + ret = emac_up(priv);
> + if (ret) {
> + dev_err(dev, "Error when bringing interface up\n");
> + goto emac_up_fail;
> + }
> + return 0;
> +
> +emac_up_fail:
please name the jump labels after the destination not the source.
Please fix everywhere in the driver.
This is covered in the kernel coding style docs.
> + emac_free_rx_resources(priv);
> +emac_alloc_rx_resource_fail:
> + emac_free_tx_resources(priv);
> +emac_alloc_tx_resource_fail:
> + return ret;
> +}
Powered by blists - more mailing lists