[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <3737a9bf-e95b-4ea8-b16b-418b777f407c@ti.com>
Date: Fri, 14 Nov 2025 16:43:21 +0530
From: Meghana Malladi <m-malladi@...com>
To: Simon Horman <horms@...nel.org>
CC: <namcao@...utronix.de>, <vadim.fedorenko@...ux.dev>,
<jacob.e.keller@...el.com>, <christian.koenig@....com>,
<sumit.semwal@...aro.org>, <sdf@...ichev.me>, <john.fastabend@...il.com>,
<hawk@...nel.org>, <daniel@...earbox.net>, <ast@...nel.org>,
<pabeni@...hat.com>, <kuba@...nel.org>, <edumazet@...gle.com>,
<davem@...emloft.net>, <andrew+netdev@...n.ch>,
<linaro-mm-sig@...ts.linaro.org>, <dri-devel@...ts.freedesktop.org>,
<linux-media@...r.kernel.org>, <bpf@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <netdev@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>, <srk@...com>, Vignesh Raghavendra
<vigneshr@...com>, Roger Quadros <rogerq@...nel.org>, <danishanwar@...com>
Subject: Re: [PATCH net-next v5 1/6] net: ti: icssg-prueth: Add functions to
create and destroy Rx/Tx queues
Hi Simon,
On 11/14/25 16:06, Simon Horman wrote:
> On Tue, Nov 11, 2025 at 03:45:18PM +0530, Meghana Malladi wrote:
>
> ...
>
>> diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
>> index 57a7d1ceab08..b66ffbfb499c 100644
>> --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
>> +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
>> @@ -735,6 +735,114 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
>> return 0;
>> }
>>
>> +static void prueth_destroy_txq(struct prueth_emac *emac)
>> +{
>> + int ret, i;
>> +
>> + atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
>> + /* ensure new tdown_cnt value is visible */
>> + smp_mb__after_atomic();
>> + /* tear down and disable UDMA channels */
>> + reinit_completion(&emac->tdown_complete);
>> + for (i = 0; i < emac->tx_ch_num; i++)
>> + k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
>> +
>> + ret = wait_for_completion_timeout(&emac->tdown_complete,
>> + msecs_to_jiffies(1000));
>> + if (!ret)
>> + netdev_err(emac->ndev, "tx teardown timeout\n");
>> +
>> + for (i = 0; i < emac->tx_ch_num; i++) {
>> + napi_disable(&emac->tx_chns[i].napi_tx);
>> + hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
>> + k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
>> + &emac->tx_chns[i],
>> + prueth_tx_cleanup);
>> + k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
>> + }
>> +}
>> +
>> +static void prueth_destroy_rxq(struct prueth_emac *emac)
>> +{
>> + int i, ret;
>> +
>> + /* tear down and disable UDMA channels */
>> + reinit_completion(&emac->tdown_complete);
>> + k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
>> +
>> + /* When RX DMA Channel Teardown is initiated, it will result in an
>> + * interrupt and a Teardown Completion Marker (TDCM) is queued into
>> + * the RX Completion queue. Acknowledging the interrupt involves
>> + * popping the TDCM descriptor from the RX Completion queue via the
>> + * RX NAPI Handler. To avoid timing out when waiting for the TDCM to
>> + * be popped, schedule the RX NAPI handler to run immediately.
>> + */
>> + if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
>> + if (napi_schedule_prep(&emac->napi_rx))
>> + __napi_schedule(&emac->napi_rx);
>> + }
>> +
>> + ret = wait_for_completion_timeout(&emac->tdown_complete,
>> + msecs_to_jiffies(1000));
>> + if (!ret)
>> + netdev_err(emac->ndev, "rx teardown timeout\n");
>> +
>> + for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) {
>> + napi_disable(&emac->napi_rx);
>> + hrtimer_cancel(&emac->rx_hrtimer);
>
> Hi Meghana,
>
> Is it intentional that the napi_disable() and hrtimer_cancel()
> are made once for each (possible) flow, rather than just once
> as was the case before this patch?
>
> Maybe the tx code, which does the same, was used as a template here
> in error?
>
Currently there is only one flow per Rx channel. But we can enable
support to add multiple flows to a given given channel. In that case
napi_disable() and hrtimer_cancel() will be invoked per flow. That being
said, though right now this for loop is redundant this is a intentional
change for multiple flows.
> Flagged by Claude Code with https://github.com/masoncl/review-prompts/
>
>> + k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i,
>> + &emac->rx_chns,
>> + prueth_rx_cleanup);
>> + }
>> +
>> + prueth_destroy_xdp_rxqs(emac);
>> + k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn);
>> +}
>
> ...
>
>> @@ -905,32 +988,8 @@ static int emac_ndo_stop(struct net_device *ndev)
>> else
>> __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
>>
>> - atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
>> - /* ensure new tdown_cnt value is visible */
>> - smp_mb__after_atomic();
>> - /* tear down and disable UDMA channels */
>> - reinit_completion(&emac->tdown_complete);
>> - for (i = 0; i < emac->tx_ch_num; i++)
>> - k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
>> -
>> - ret = wait_for_completion_timeout(&emac->tdown_complete,
>> - msecs_to_jiffies(1000));
>> - if (!ret)
>> - netdev_err(ndev, "tx teardown timeout\n");
>> -
>> - prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
>> - for (i = 0; i < emac->tx_ch_num; i++) {
>> - napi_disable(&emac->tx_chns[i].napi_tx);
>> - hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
>> - }
>> -
>> - max_rx_flows = PRUETH_MAX_RX_FLOWS;
>> - k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
>> -
>> - prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
>> - prueth_destroy_xdp_rxqs(emac);
>> - napi_disable(&emac->napi_rx);
>> - hrtimer_cancel(&emac->rx_hrtimer);
>> + prueth_destroy_txq(emac);
>> + prueth_destroy_rxq(emac);
>>
>> cancel_work_sync(&emac->rx_mode_work);
>>
>> @@ -943,10 +1002,10 @@ static int emac_ndo_stop(struct net_device *ndev)
>>
>> free_irq(emac->tx_ts_irq, emac);
>>
>> - free_irq(emac->rx_chns.irq[rx_flow], emac);
>> + free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac);
>> prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
>>
>> - prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
>> + prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS);
>> prueth_cleanup_tx_chns(emac);
>>
>> prueth->emacs_initialized--;
>
> ...
Powered by blists - more mailing lists