[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250502094155.00003f74@huawei.com>
Date: Fri, 2 May 2025 09:41:55 +0100
From: Jonathan Cameron <Jonathan.Cameron@...wei.com>
To: Erick Karanja <karanja99erick@...il.com>
CC: <manivannan.sadhasivam@...aro.org>, <kw@...ux.com>, <kishon@...nel.org>,
<bhelgaas@...gle.com>, <linux-pci@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <julia.lawall@...ia.fr>
Subject: Re: [PATCH 1/2] PCI: endpoint: Replace manual mutex handling with
scoped_guard()
On Thu, 1 May 2025 18:56:11 +0300
Erick Karanja <karanja99erick@...il.com> wrote:
In the patch name always mention which driver it is.
> This refactor replaces manual mutex lock/unlock with scoped_guard()
> in places where early exits use goto. Using scoped_guard()
> avoids error-prone unlock paths and simplifies control flow.
In this case that only works because you've replicated the other
unwinding coding in various error paths. That's more error prone
so I'm not convinced this particular case is a good use of scoped cleanup.
>
> Signed-off-by: Erick Karanja <karanja99erick@...il.com>
> ---
> drivers/pci/endpoint/functions/pci-epf-mhi.c | 358 +++++++++----------
> 1 file changed, 166 insertions(+), 192 deletions(-)
>
> diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> index 6643a88c7a0c..57ef522c3d07 100644
> --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
> +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
> @@ -323,57 +323,52 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
> if (buf_info->size < SZ_4K)
> return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
>
> - mutex_lock(&epf_mhi->lock);
> -
> - config.direction = DMA_DEV_TO_MEM;
> - config.src_addr = buf_info->host_addr;
> + scoped_guard(mutex, &epf_mhi->lock) {
I'd be tempted to use
guard(mutex)(&epf_mhi->lock); given your scope goes to the end
of the function.
That will reduce the diff and make it easier to spot any functional changes...
> + config.direction = DMA_DEV_TO_MEM;
> + config.src_addr = buf_info->host_addr;
>
> - ret = dmaengine_slave_config(chan, &config);
> - if (ret) {
> - dev_err(dev, "Failed to configure DMA channel\n");
> - goto err_unlock;
> - }
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + return ret;
> + }
>
> - dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> - DMA_FROM_DEVICE);
> - ret = dma_mapping_error(dma_dev, dst_addr);
> - if (ret) {
> - dev_err(dev, "Failed to map remote memory\n");
> - goto err_unlock;
> - }
> + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_FROM_DEVICE);
> + ret = dma_mapping_error(dma_dev, dst_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + return ret;
> + }
>
> - desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> - DMA_DEV_TO_MEM,
> - DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> - if (!desc) {
> - dev_err(dev, "Failed to prepare DMA\n");
> - ret = -EIO;
> - goto err_unmap;
> - }
> + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> + DMA_DEV_TO_MEM,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
In cases like this where is other cleanup to do the advantages of scoped
cleanup are greatly reduced. I'm not sure it is a good idea here.
> + return -EIO;
> + }
>
> - desc->callback = pci_epf_mhi_dma_callback;
> - desc->callback_param = &complete;
> + desc->callback = pci_epf_mhi_dma_callback;
> + desc->callback_param = &complete;
>
> - cookie = dmaengine_submit(desc);
> - ret = dma_submit_error(cookie);
> - if (ret) {
> - dev_err(dev, "Failed to do DMA submit\n");
> - goto err_unmap;
> - }
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> + return ret;
> + }
>
> - dma_async_issue_pending(chan);
> - ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> - if (!ret) {
> - dev_err(dev, "DMA transfer timeout\n");
> - dmaengine_terminate_sync(chan);
> - ret = -ETIMEDOUT;
This path previously hit the err_unmap condition and now it doesn't.
Why that functional change?
> + dma_async_issue_pending(chan);
> + ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> + if (!ret) {
> + dev_err(dev, "DMA transfer timeout\n");
> + dmaengine_terminate_sync(chan);
> + ret = -ETIMEDOUT;
return -ETIMEDOUT;
> + }
> }
> -
Even in good path were were previously unmapping and now we aren't.
> -err_unmap:
> - dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> -err_unlock:
> - mutex_unlock(&epf_mhi->lock);
> -
> return ret;
return 0; //assuming no way to get here with ret set.
> }
>
> @@ -394,57 +389,52 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
> if (buf_info->size < SZ_4K)
> return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
>
> - mutex_lock(&epf_mhi->lock);
> -
> - config.direction = DMA_MEM_TO_DEV;
> - config.dst_addr = buf_info->host_addr;
> + scoped_guard(mutex, &epf_mhi->lock) {
> + config.direction = DMA_MEM_TO_DEV;
> + config.dst_addr = buf_info->host_addr;
>
> - ret = dmaengine_slave_config(chan, &config);
> - if (ret) {
> - dev_err(dev, "Failed to configure DMA channel\n");
> - goto err_unlock;
> - }
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + return ret;
> + }
>
> - src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> - DMA_TO_DEVICE);
> - ret = dma_mapping_error(dma_dev, src_addr);
> - if (ret) {
> - dev_err(dev, "Failed to map remote memory\n");
> - goto err_unlock;
> - }
> + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_TO_DEVICE);
> + ret = dma_mapping_error(dma_dev, src_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + return ret;
> + }
>
> - desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> - DMA_MEM_TO_DEV,
> - DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> - if (!desc) {
> - dev_err(dev, "Failed to prepare DMA\n");
> - ret = -EIO;
> - goto err_unmap;
> - }
> + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> + DMA_MEM_TO_DEV,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return -EIO;
> + }
>
> - desc->callback = pci_epf_mhi_dma_callback;
> - desc->callback_param = &complete;
> + desc->callback = pci_epf_mhi_dma_callback;
> + desc->callback_param = &complete;
>
> - cookie = dmaengine_submit(desc);
> - ret = dma_submit_error(cookie);
> - if (ret) {
> - dev_err(dev, "Failed to do DMA submit\n");
> - goto err_unmap;
> - }
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return ret;
> + }
>
> - dma_async_issue_pending(chan);
> - ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> - if (!ret) {
> - dev_err(dev, "DMA transfer timeout\n");
> - dmaengine_terminate_sync(chan);
> - ret = -ETIMEDOUT;
> + dma_async_issue_pending(chan);
> + ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
> + if (!ret) {
> + dev_err(dev, "DMA transfer timeout\n");
> + dmaengine_terminate_sync(chan);
> + ret = -ETIMEDOUT;
return -EITMEDOUT;
Same issue with logic change to no unmap any more.
> + }
> }
> -
> -err_unmap:
> - dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> -err_unlock:
> - mutex_unlock(&epf_mhi->lock);
> -
> return ret;
return 0; if no way to get here without an error set
> }
>
> @@ -497,67 +487,59 @@ static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
> dma_addr_t dst_addr;
> int ret;
>
> - mutex_lock(&epf_mhi->lock);
> + scoped_guard(mutex, &epf_mhi->lock) {
> + config.direction = DMA_DEV_TO_MEM;
> + config.src_addr = buf_info->host_addr;
>
> - config.direction = DMA_DEV_TO_MEM;
> - config.src_addr = buf_info->host_addr;
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + return ret;
> + }
>
> - ret = dmaengine_slave_config(chan, &config);
> - if (ret) {
> - dev_err(dev, "Failed to configure DMA channel\n");
> - goto err_unlock;
> - }
> + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_FROM_DEVICE);
> + ret = dma_mapping_error(dma_dev, dst_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + return ret;
> + }
>
> - dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> - DMA_FROM_DEVICE);
> - ret = dma_mapping_error(dma_dev, dst_addr);
> - if (ret) {
> - dev_err(dev, "Failed to map remote memory\n");
> - goto err_unlock;
> - }
> + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> + DMA_DEV_TO_MEM,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> + return -EIO;
> + }
>
> - desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
> - DMA_DEV_TO_MEM,
> - DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> - if (!desc) {
> - dev_err(dev, "Failed to prepare DMA\n");
> - ret = -EIO;
> - goto err_unmap;
> - }
> + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> + if (!transfer) {
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> + return -ENOMEM;
> + }
>
> - transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> - if (!transfer) {
> - ret = -ENOMEM;
> - goto err_unmap;
> - }
> + transfer->epf_mhi = epf_mhi;
> + transfer->paddr = dst_addr;
> + transfer->size = buf_info->size;
> + transfer->dir = DMA_FROM_DEVICE;
> + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
>
> - transfer->epf_mhi = epf_mhi;
> - transfer->paddr = dst_addr;
> - transfer->size = buf_info->size;
> - transfer->dir = DMA_FROM_DEVICE;
> - memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
> + desc->callback = pci_epf_mhi_dma_async_callback;
> + desc->callback_param = transfer;
>
> - desc->callback = pci_epf_mhi_dma_async_callback;
> - desc->callback_param = transfer;
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + kfree(transfer);
> + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> + return ret;
> + }
>
> - cookie = dmaengine_submit(desc);
> - ret = dma_submit_error(cookie);
> - if (ret) {
> - dev_err(dev, "Failed to do DMA submit\n");
> - goto err_free_transfer;
> + dma_async_issue_pending(chan);
> }
> -
> - dma_async_issue_pending(chan);
> -
> - goto err_unlock;
> -
> -err_free_transfer:
> - kfree(transfer);
> -err_unmap:
> - dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
> -err_unlock:
> - mutex_unlock(&epf_mhi->lock);
> -
> return ret;
> }
>
> @@ -576,67 +558,59 @@ static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
> dma_addr_t src_addr;
> int ret;
>
> - mutex_lock(&epf_mhi->lock);
> + scoped_guard(mutex, &epf_mhi->lock) {
> + config.direction = DMA_MEM_TO_DEV;
> + config.dst_addr = buf_info->host_addr;
>
> - config.direction = DMA_MEM_TO_DEV;
> - config.dst_addr = buf_info->host_addr;
> + ret = dmaengine_slave_config(chan, &config);
> + if (ret) {
> + dev_err(dev, "Failed to configure DMA channel\n");
> + return ret;
> + }
>
> - ret = dmaengine_slave_config(chan, &config);
> - if (ret) {
> - dev_err(dev, "Failed to configure DMA channel\n");
> - goto err_unlock;
> - }
> + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> + DMA_TO_DEVICE);
> + ret = dma_mapping_error(dma_dev, src_addr);
> + if (ret) {
> + dev_err(dev, "Failed to map remote memory\n");
> + return ret;
> + }
>
> - src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
> - DMA_TO_DEVICE);
> - ret = dma_mapping_error(dma_dev, src_addr);
> - if (ret) {
> - dev_err(dev, "Failed to map remote memory\n");
> - goto err_unlock;
> - }
> + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> + DMA_MEM_TO_DEV,
> + DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> + if (!desc) {
> + dev_err(dev, "Failed to prepare DMA\n");
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return -EIO;
> + }
>
> - desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
> - DMA_MEM_TO_DEV,
> - DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
> - if (!desc) {
> - dev_err(dev, "Failed to prepare DMA\n");
> - ret = -EIO;
> - goto err_unmap;
> - }
> + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> + if (!transfer) {
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return -ENOMEM;
> + }
>
> - transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
> - if (!transfer) {
> - ret = -ENOMEM;
> - goto err_unmap;
> - }
> + transfer->epf_mhi = epf_mhi;
> + transfer->paddr = src_addr;
> + transfer->size = buf_info->size;
> + transfer->dir = DMA_TO_DEVICE;
> + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
>
> - transfer->epf_mhi = epf_mhi;
> - transfer->paddr = src_addr;
> - transfer->size = buf_info->size;
> - transfer->dir = DMA_TO_DEVICE;
> - memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
> + desc->callback = pci_epf_mhi_dma_async_callback;
> + desc->callback_param = transfer;
>
> - desc->callback = pci_epf_mhi_dma_async_callback;
> - desc->callback_param = transfer;
> + cookie = dmaengine_submit(desc);
> + ret = dma_submit_error(cookie);
> + if (ret) {
> + dev_err(dev, "Failed to do DMA submit\n");
> + kfree(transfer);
> + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> + return ret;
> + }
>
> - cookie = dmaengine_submit(desc);
> - ret = dma_submit_error(cookie);
> - if (ret) {
> - dev_err(dev, "Failed to do DMA submit\n");
> - goto err_free_transfer;
> + dma_async_issue_pending(chan);
> }
> -
> - dma_async_issue_pending(chan);
> -
> - goto err_unlock;
> -
> -err_free_transfer:
> - kfree(transfer);
> -err_unmap:
> - dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
> -err_unlock:
> - mutex_unlock(&epf_mhi->lock);
> -
> return ret;
> }
>
Powered by blists - more mailing lists