[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aLaR5saI469MsgAT@vaman>
Date: Tue, 2 Sep 2025 12:12:46 +0530
From: Vinod Koul <vkoul@...nel.org>
To: Rosen Penev <rosenp@...il.com>
Cc: dmaengine@...r.kernel.org, open list <linux-kernel@...r.kernel.org>
Subject: Re: [PATCHv3 3/3] dmaengine: mv_xor: use devm for request_irq
On 27-08-25, 15:00, Rosen Penev wrote:
> This is only called in _probe. Removes the need to manually free_irq.
That can be intentional! We need to ensure the device is quiesced before
teardown...
>
> Same with irq_dispose_mapping.
>
> Signed-off-by: Rosen Penev <rosenp@...il.com>
> ---
> drivers/dma/mv_xor.c | 21 +++++++--------------
> 1 file changed, 7 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
> index d15a1990534b..81799ac2f48b 100644
> --- a/drivers/dma/mv_xor.c
> +++ b/drivers/dma/mv_xor.c
> @@ -1025,8 +1025,6 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
> list_del(&chan->device_node);
> }
>
> - free_irq(mv_chan->irq, mv_chan);
> -
> return 0;
> }
>
> @@ -1112,8 +1110,9 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
> /* clear errors before enabling interrupts */
> mv_chan_clear_err_status(mv_chan);
>
> - ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
> - 0, dev_name(&pdev->dev), mv_chan);
> + ret = devm_request_irq(&pdev->dev, mv_chan->irq,
> + mv_xor_interrupt_handler, 0,
> + dev_name(&pdev->dev), mv_chan);
> if (ret)
> goto err_free_dma;
>
> @@ -1138,14 +1137,14 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
> ret = mv_chan_memcpy_self_test(mv_chan);
> dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
> if (ret)
> - goto err_free_irq;
> + goto err_free_dma;
> }
>
> if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
> ret = mv_chan_xor_self_test(mv_chan);
> dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
> if (ret)
> - goto err_free_irq;
> + goto err_free_dma;
> }
>
> dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
> @@ -1156,12 +1155,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
>
> ret = dma_async_device_register(dma_dev);
> if (ret)
> - goto err_free_irq;
> + goto err_free_dma;
>
> return mv_chan;
>
> -err_free_irq:
> - free_irq(mv_chan->irq, mv_chan);
> err_free_dma:
> dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE,
> mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
> @@ -1400,7 +1397,6 @@ static int mv_xor_probe(struct platform_device *pdev)
> cap_mask, irq);
> if (IS_ERR(chan)) {
> ret = PTR_ERR(chan);
> - irq_dispose_mapping(irq);
> goto err_channel_add;
> }
>
> @@ -1435,11 +1431,8 @@ static int mv_xor_probe(struct platform_device *pdev)
>
> err_channel_add:
> for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
> - if (xordev->channels[i]) {
> + if (xordev->channels[i])
> mv_xor_channel_remove(xordev->channels[i]);
> - if (pdev->dev.of_node)
> - irq_dispose_mapping(xordev->channels[i]->irq);
> - }
>
> return ret;
> }
> --
> 2.51.0
--
~Vinod
Powered by blists - more mailing lists