[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20251127101116.012309e7@fedora>
Date: Thu, 27 Nov 2025 10:11:16 +0100
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Adrián Larumbe <adrian.larumbe@...labora.com>
Cc: linux-kernel@...r.kernel.org, dri-devel@...ts.freedesktop.org, Steven
Price <steven.price@....com>, kernel@...labora.com, Liviu Dudau
<liviu.dudau@....com>, Maarten Lankhorst
<maarten.lankhorst@...ux.intel.com>, Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>, David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>
Subject: Re: [PATCH v2 1/1] drm/panthor: Support partial unmaps of huge
pages
On Thu, 27 Nov 2025 03:50:13 +0000
Adrián Larumbe <adrian.larumbe@...labora.com> wrote:
> Commit 33729a5fc0ca ("iommu/io-pgtable-arm: Remove split on unmap
> behavior") did away with the treatment of partial unmaps of huge IOPTEs.
>
> In the case of Panthor, that means an attempt to run a VM_BIND unmap
> operation on a memory region whose start address and size aren't 2MiB
> aligned, in the event it intersects with a huge page, would lead to ARM
> IOMMU management code to fail and a warning being raised.
>
> Presently, and for lack of a better alternative, it's best to have
> Panthor handle partial unmaps at the driver level, by unmapping entire
> huge pages and remapping the difference between them and the requested
> unmap region.
>
> This could change in the future when the VM_BIND uAPI is expanded to
> enforce huge page alignment and map/unmap operational constraints that
> render this code unnecessary.
>
> Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
> ---
> drivers/gpu/drm/panthor/panthor_mmu.c | 76 +++++++++++++++++++++++++++
> 1 file changed, 76 insertions(+)
>
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 183da30fa500..41d7974c95ea 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -2110,6 +2110,57 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
> return 0;
> }
>
> +static bool
> +is_huge_page(const struct panthor_vma *unmap_vma, u64 addr)
The function name doesn't really match the arguments it's being
passed. I'd rename this function iova_mapped_as_huge_page(). I'd also
rename unmap_vma into vma (the helper doesn't have to know that the
test is used for unmapping), and addr into iova.
> +{
> + const struct page *pg;
> + pgoff_t bo_offset;
> +
> + bo_offset = addr - unmap_vma->base.va.addr + unmap_vma->base.gem.offset;
> + pg = to_panthor_bo(unmap_vma->base.gem.obj)->base.pages[bo_offset >> PAGE_SHIFT];
> +
> + return (folio_order(page_folio(pg)) >= PMD_ORDER);
I don't think we should use PMD_ORDER for this test, because the GPU
MMU page size might differ from the CPU one, and what we care about
here is whether this page is huge from the GPU MMU perspective. IOW, we
should have:
return folio_size(page_folio(pg)) >= SZ_2M;
> +}
> +
> +struct remap_params {
> + u64 prev_remap_start, prev_remap_range;
> + u64 next_remap_start, next_remap_range;
> +};
> +
> +static struct remap_params
> +get_map_unmap_intervals(const struct drm_gpuva_op_remap *op,
> + const struct panthor_vma *unmap_vma,
> + u64 *unmap_start, u64 *unmap_range)
> +{
> + u64 aligned_unmap_start, aligned_unmap_end, unmap_end;
> + struct remap_params params = {0};
> +
> + drm_gpuva_op_remap_to_unmap_range(op, unmap_start, unmap_range);
> + unmap_end = *unmap_start + *unmap_range;
> +
> + aligned_unmap_start = ALIGN_DOWN(*unmap_start, SZ_2M);
> +
> + if (aligned_unmap_start < *unmap_start &&
> + unmap_vma->base.va.addr <= aligned_unmap_start &&
> + is_huge_page(unmap_vma, *unmap_start)) {
> + params.prev_remap_start = aligned_unmap_start;
> + params.prev_remap_range = *unmap_start & (SZ_2M - 1);
> + *unmap_range += *unmap_start - aligned_unmap_start;
> + *unmap_start = aligned_unmap_start;
> + }
> +
> + aligned_unmap_end = ALIGN(unmap_end, SZ_2M);
> +
> + if (aligned_unmap_end > unmap_end &&
> + (unmap_vma->base.va.addr + unmap_vma->base.va.range >= aligned_unmap_end) &&
> + is_huge_page(unmap_vma, unmap_end - 1)) {
> + *unmap_range += params.next_remap_range = aligned_unmap_end - unmap_end;
Let's do that in two steps to make it more readable please:
params.next_remap_range = aligned_unmap_end - unmap_end;
*unmap_range += params.next_remap_range;
> + params.next_remap_start = unmap_end;
> + }
> +
> + return params;
> +}
> +
> static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
> void *priv)
> {
> @@ -2118,19 +2169,44 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
> struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
> struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
> u64 unmap_start, unmap_range;
> + struct remap_params params;
> int ret;
>
> drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
> +
> + /*
> + * ARM IOMMU page table management code disallows partial unmaps of huge pages,
> + * so when a partial unmap is requested, we must first unmap the entire huge
> + * page and then remap the difference between the huge page minus the requested
> + * unmap region. Calculating the right offsets and ranges for the different unmap
> + * and map operations is the responsibility of the following function.
> + */
> + params = get_map_unmap_intervals(&op->remap, unmap_vma, &unmap_start, &unmap_range);
> +
> ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
> if (ret)
> return ret;
>
> if (op->remap.prev) {
> + ret = panthor_vm_map_pages(vm, params.prev_remap_start,
> + flags_to_prot(unmap_vma->flags),
> + to_drm_gem_shmem_obj(op->remap.prev->gem.obj)->sgt,
> + op->remap.prev->gem.offset, params.prev_remap_range);
> + if (ret)
> + return ret;
> +
> prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
> panthor_vma_init(prev_vma, unmap_vma->flags);
> }
>
> if (op->remap.next) {
> + ret = panthor_vm_map_pages(vm, params.next_remap_start,
> + flags_to_prot(unmap_vma->flags),
> + to_drm_gem_shmem_obj(op->remap.next->gem.obj)->sgt,
> + op->remap.next->gem.offset, params.next_remap_range);
> + if (ret)
> + return ret;
> +
> next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
> panthor_vma_init(next_vma, unmap_vma->flags);
> }
Powered by blists - more mailing lists