[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241202121402.0d564c5c@collabora.com>
Date: Mon, 2 Dec 2024 12:14:02 +0100
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Adrián Larumbe <adrian.larumbe@...labora.com>
Cc: Rob Herring <robh@...nel.org>, Steven Price <steven.price@....com>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>, Maxime Ripard
<mripard@...nel.org>, Thomas Zimmermann <tzimmermann@...e.de>, David Airlie
<airlied@...il.com>, Simona Vetter <simona@...ll.ch>, Philipp Zabel
<p.zabel@...gutronix.de>, kernel@...labora.com,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 5/8] drm/panfrost: Handle page mapping failure
On Thu, 28 Nov 2024 21:06:20 +0000
Adrián Larumbe <adrian.larumbe@...labora.com> wrote:
> When mapping the pages of a BO, either a heap type at page fault time or
> else a non-heap BO at object creation time, if the ARM page table mapping
> function fails, we unmap what had been mapped so far and bail out.
>
> Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
> ---
> drivers/gpu/drm/panfrost/panfrost_mmu.c | 44 ++++++++++++++++++++++---
> 1 file changed, 39 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 03ac527b35e7..5e30888bea0e 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -290,13 +290,31 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
> pm_runtime_put_autosuspend(pfdev->base.dev);
> }
>
> +static void mmu_unmap_range(size_t len, u64 iova, struct io_pgtable_ops *ops)
Can we use the following prototype instead?
static void mmu_unmap_range(struct panfrost_mmu *mmu,
u64 iova, size_t len)
> +{
> + size_t pgsize, unmapped_len = 0;
> + size_t unmapped_page, pgcount;
> +
> + while (unmapped_len < len) {
> + pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
> +
> + unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
> + WARN_ON(unmapped_page != pgsize * pgcount);
> +
> + iova += pgsize * pgcount;
> + unmapped_len += pgsize * pgcount;
> + }
> +}
> +
> static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
> u64 iova, int prot, struct sg_table *sgt)
> {
> unsigned int count;
> struct scatterlist *sgl;
> struct io_pgtable_ops *ops = mmu->pgtbl_ops;
> + size_t total_mapped = 0;
> u64 start_iova = iova;
> + int ret = 0;
>
> for_each_sgtable_dma_sg(sgt, sgl, count) {
> unsigned long paddr = sg_dma_address(sgl);
> @@ -310,10 +328,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
> size_t pgcount, mapped = 0;
> size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
>
> - ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
> + ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
> GFP_KERNEL, &mapped);
> + if (ret) {
> + mmu_unmap_range(total_mapped, start_iova, ops);
> + return ret;
I general, I'm not a huge fan of those error paths where things are
undone manually, because they tend to be overlooked when new steps are
added. I'd rather have a "goto error_unmap_pages;" here, and the
according label defined at the end of the function.
> + }
> /* Don't get stuck if things have gone wrong */
> mapped = max(mapped, pgsize);
> + total_mapped += mapped;
> iova += mapped;
> paddr += mapped;
> len -= mapped;
> @@ -333,6 +356,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
> struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
> struct sg_table *sgt;
> int prot = IOMMU_READ | IOMMU_WRITE;
> + int ret;
>
> if (WARN_ON(mapping->active))
> return 0;
> @@ -344,8 +368,13 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
> if (WARN_ON(IS_ERR(sgt)))
> return PTR_ERR(sgt);
>
> - mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
> - prot, sgt);
> + ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
> + prot, sgt);
> + if (ret) {
> + drm_gem_shmem_put_pages(shmem);
Same here.
> + return ret;
> + }
> +
> mapping->active = true;
>
> return 0;
> @@ -532,8 +561,10 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
> if (ret)
> goto err_map;
>
> - mmu_map_sg(pfdev, bomapping->mmu, addr,
> - IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
> + ret = mmu_map_sg(pfdev, bomapping->mmu, addr,
> + IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
> + if (ret)
> + goto err_mmu_map_sg;
>
> bomapping->active = true;
> bo->heap_rss_size += SZ_2M;
> @@ -547,6 +578,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>
> return 0;
>
> +err_mmu_map_sg:
> + dma_unmap_sgtable(pfdev->base.dev, sgt,
> + DMA_BIDIRECTIONAL, 0);
> err_map:
> sg_free_table(sgt);
> err_unlock:
Powered by blists - more mailing lists