[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251021164203.2b9fec35@fedora>
Date: Tue, 21 Oct 2025 16:42:03 +0200
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Adrián Larumbe <adrian.larumbe@...labora.com>
Cc: linux-kernel@...r.kernel.org, dri-devel@...ts.freedesktop.org, Steven
Price <steven.price@....com>, kernel@...labora.com, Liviu Dudau
<liviu.dudau@....com>, Maarten Lankhorst
<maarten.lankhorst@...ux.intel.com>, Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>, David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>
Subject: Re: [PATCH] drm/panthor: Support partial unmaps of huge pages
Hi Adrian,
On Sun, 19 Oct 2025 04:19:42 +0100
Adrián Larumbe <adrian.larumbe@...labora.com> wrote:
> Commit 33729a5fc0ca ("iommu/io-pgtable-arm: Remove split on unmap
> behavior") did away with the treatment of partial unmaps of huge IOPTEs.
>
> In the case of Panthor, that means an attempt to run a VM_BIND unmap
> operation on a memory region whose start address and size aren't 2MiB
> aligned, in the event it intersects with a huge page, would lead to ARM
> IOMMU management code to fail and a warning being raised.
>
> Presently, and for lack of a better alternative, it's best to have
> Panthor handle partial unmaps at the driver level, by unmapping entire
> huge pages and remapping the difference between them and the requested
> unmap region.
>
> This could change in the future when the VM_BIND uAPI is expanded to
> enforce huge page alignment and map/unmap operational constraints that
> render this code unnecessary.
>
> Signed-off-by: Adrián Larumbe <adrian.larumbe@...labora.com>
> ---
> drivers/gpu/drm/panthor/panthor_mmu.c | 129 +++++++++++++++++++++++++-
> 1 file changed, 126 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
> index 2d041a2e75e9..f9d200e57c04 100644
> --- a/drivers/gpu/drm/panthor/panthor_mmu.c
> +++ b/drivers/gpu/drm/panthor/panthor_mmu.c
> @@ -2093,6 +2093,98 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
> return 0;
> }
>
> +static bool
> +is_huge_page_partial_unmap(const struct panthor_vma *unmap_vma,
> + const struct drm_gpuva_op_map *op,
> + u64 unmap_start, u64 unmap_range,
> + u64 sz2m_prev, u64 sz2m_next)
> +{
> + size_t pgcount, pgsize;
> + const struct page *pg;
> + pgoff_t bo_offset;
> +
> + if (op->va.addr < unmap_vma->base.va.addr) {
> + bo_offset = unmap_start - unmap_vma->base.va.addr + unmap_vma->base.gem.offset;
> + sz2m_prev = ALIGN_DOWN(unmap_start, SZ_2M);
> + sz2m_next = ALIGN(unmap_start + 1, SZ_2M);
> + pgsize = get_pgsize(unmap_start, unmap_range, &pgcount);
> +
> + } else {
> + bo_offset = ((unmap_start + unmap_range - 1) - unmap_vma->base.va.addr)
> + + unmap_vma->base.gem.offset;
> + sz2m_prev = ALIGN_DOWN(unmap_start + unmap_range - 1, SZ_2M);
> + sz2m_next = ALIGN(unmap_start + unmap_range, SZ_2M);
> + pgsize = get_pgsize(sz2m_prev, unmap_start + unmap_range - sz2m_prev, &pgcount);
> + }
> +
> + pg = to_panthor_bo(unmap_vma->base.gem.obj)->base.pages[bo_offset >> PAGE_SHIFT];
> +
> + if (pgsize == SZ_4K && folio_order(page_folio(pg)) == PMD_ORDER &&
> + unmap_vma->base.va.addr <= sz2m_prev && unmap_vma->base.va.addr +
> + unmap_vma->base.va.range >= sz2m_next)
> + return true;
> +
> + return false;
> +}
> +
> +struct remap_params {
> + u64 prev_unmap_start, prev_unmap_range;
> + u64 prev_remap_start, prev_remap_range;
> + u64 next_unmap_start, next_unmap_range;
> + u64 next_remap_start, next_remap_range;
> + u64 unmap_start, unmap_range;
> +};
> +
> +static struct remap_params
> +get_map_unmap_intervals(const struct drm_gpuva_op_remap *op,
> + const struct panthor_vma *unmap_vma)
> +{
> + u64 unmap_start, unmap_range, sz2m_prev, sz2m_next;
> + struct remap_params params = {0};
> +
> + drm_gpuva_op_remap_to_unmap_range(op, &unmap_start, &unmap_range);
> +
> + if (op->prev) {
> + sz2m_prev = ALIGN_DOWN(unmap_start, SZ_2M);
> + sz2m_next = ALIGN(unmap_start + 1, SZ_2M);
> +
> + if (is_huge_page_partial_unmap(unmap_vma, op->prev, unmap_start,
> + unmap_range, sz2m_prev, sz2m_next)) {
> + params.prev_unmap_start = sz2m_prev;
> + params.prev_unmap_range = SZ_2M;
> + params.prev_remap_start = sz2m_prev;
> + params.prev_remap_range = unmap_start & (SZ_2M - 1);
> +
> + u64 diff = min(sz2m_next - unmap_start, unmap_range);
> +
> + unmap_range -= diff;
> + unmap_start += diff;
> + }
> + }
> +
> + if (op->next) {
> + sz2m_prev = ALIGN_DOWN(unmap_start + unmap_range - 1, SZ_2M);
> + sz2m_next = ALIGN(unmap_start + unmap_range, SZ_2M);
> +
> + if (is_huge_page_partial_unmap(unmap_vma, op->next, unmap_start,
> + unmap_range, sz2m_prev, sz2m_next)) {
> + if (unmap_range) {
> + params.next_unmap_start = sz2m_prev;
> + params.next_unmap_range = SZ_2M;
> + unmap_range -= op->next->va.addr & (SZ_2M - 1);
> + }
> +
> + params.next_remap_start = op->next->va.addr;
> + params.next_remap_range = SZ_2M - (op->next->va.addr & (SZ_2M - 1));
> + }
> + }
> +
> + params.unmap_start = unmap_start;
> + params.unmap_range = unmap_range;
> +
> + return params;
> +}
> +
> static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
> void *priv)
> {
> @@ -2100,20 +2192,51 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
> struct panthor_vm *vm = priv;
> struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
> struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
> - u64 unmap_start, unmap_range;
> + struct remap_params params;
> int ret;
>
> - drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
> - ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
> + /*
> + * ARM IOMMU page table management code disallows partial unmaps of huge pages,
> + * so when a partial unmap is requested, we must first unmap the entire huge
> + * page and then remap the difference between the huge page minus the requested
> + * unmap region. Calculating the right offsets and ranges for the different unmap
> + * and map operations is the responsibility of the following function.
> + */
> + params = get_map_unmap_intervals(&op->remap, unmap_vma);
> +
> + ret = panthor_vm_unmap_pages(vm, params.unmap_start, params.unmap_range);
> if (ret)
> return ret;
>
> if (op->remap.prev) {
> + ret = panthor_vm_unmap_pages(vm, params.prev_unmap_start,
> + params.prev_unmap_range);
This should be part of the previous unmap.
> + if (ret)
> + return ret;
> + ret = panthor_vm_map_pages(vm, params.prev_remap_start,
> + flags_to_prot(unmap_vma->flags),
> + to_drm_gem_shmem_obj(op->remap.prev->gem.obj)->sgt,
> + op->remap.prev->gem.offset, params.prev_remap_range);
> + if (ret)
> + return ret;
> +
> prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
> panthor_vma_init(prev_vma, unmap_vma->flags);
> }
>
> if (op->remap.next) {
> + ret = panthor_vm_unmap_pages(vm, params.next_unmap_start,
> + params.next_unmap_range);
This one too.
> + if (ret)
> + return ret;
> +
> + ret = panthor_vm_map_pages(vm, params.next_remap_start,
> + flags_to_prot(unmap_vma->flags),
> + to_drm_gem_shmem_obj(op->remap.next->gem.obj)->sgt,
> + op->remap.next->gem.offset, params.next_remap_range);
> + if (ret)
> + return ret;
> +
> next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
> panthor_vma_init(next_vma, unmap_vma->flags);
> }
Overall, it feels more complicated than what I had in mind (see
below, only compile-tested though).
Cheers,
Boris
--->8---
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 6dec4354e378..15718241fd2f 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -2093,27 +2093,104 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
return 0;
}
+static void
+align_unmap_range(const struct drm_gpuva_op_remap *op,
+ u64 *unmap_start, u64 *unmap_end)
+{
+ u64 aligned_unmap_start = ALIGN_DOWN(*unmap_start, SZ_2M);
+ u64 aligned_unmap_end = ALIGN(*unmap_end, SZ_2M);
+
+ /* If we're dealing with a huge page, make sure the unmap region is
+ * aligned on the start of the page.
+ */
+ if (op->prev && aligned_unmap_start < *unmap_start &&
+ op->prev->va.addr <= aligned_unmap_start) {
+ struct panthor_gem_object *bo = to_panthor_bo(op->prev->gem.obj);
+ u64 bo_offset = op->prev->gem.offset + *unmap_start -
+ op->prev->va.addr;
+ const struct page *pg = bo->base.pages[bo_offset >> PAGE_SHIFT];
+
+ if (folio_size(page_folio(pg)) >= SZ_2M)
+ *unmap_start = aligned_unmap_start;
+ }
+
+ /* If we're dealing with a huge page, make sure the unmap region is
+ * aligned on the end of the page.
+ */
+ if (op->next && aligned_unmap_end > *unmap_end &&
+ op->next->va.addr + op->next->va.range >= aligned_unmap_end) {
+ struct panthor_gem_object *bo = to_panthor_bo(op->next->gem.obj);
+ u64 bo_offset = op->next->gem.offset + op->next->va.addr +
+ op->next->va.range - *unmap_end;
+ const struct page *pg = bo->base.pages[bo_offset >> PAGE_SHIFT];
+
+ if (folio_size(page_folio(pg)) >= SZ_2M)
+ *unmap_end = aligned_unmap_end;
+ }
+}
+
static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
void *priv)
{
struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base);
+ u64 unmap_start, unmap_range, unmap_end, aligned_unmap_start, aligned_unmap_end;
struct panthor_vm *vm = priv;
struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
- u64 unmap_start, unmap_range;
int ret;
+ /*
+ * ARM IOMMU page table management code disallows partial unmaps of huge pages,
+ * so when a partial unmap is requested, we must first unmap the entire huge
+ * page and then remap the difference between the huge page minus the requested
+ * unmap region. Calculating the right offsets and ranges for the different unmap
+ * and map operations is the responsibility of the following function.
+ */
drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
- ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
+ unmap_end = unmap_start + unmap_range;
+ aligned_unmap_start = unmap_start;
+ aligned_unmap_end = unmap_end;
+ align_unmap_range(&op->remap, &aligned_unmap_start, &aligned_unmap_end);
+
+ ret = panthor_vm_unmap_pages(vm, aligned_unmap_start,
+ aligned_unmap_end - aligned_unmap_start);
if (ret)
return ret;
if (op->remap.prev) {
+ if (aligned_unmap_start < unmap_start) {
+ struct panthor_gem_object *bo =
+ to_panthor_bo(op->remap.prev->gem.obj);
+ u64 bo_offset = op->remap.prev->gem.offset +
+ aligned_unmap_start - op->remap.prev->va.addr;
+
+ ret = panthor_vm_map_pages(vm, aligned_unmap_start,
+ flags_to_prot(unmap_vma->flags),
+ bo->base.sgt, bo_offset,
+ unmap_start - aligned_unmap_start);
+ if (ret)
+ return ret;
+ }
+
prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
panthor_vma_init(prev_vma, unmap_vma->flags);
}
if (op->remap.next) {
+ if (aligned_unmap_end > unmap_end) {
+ struct panthor_gem_object *bo =
+ to_panthor_bo(op->remap.next->gem.obj);
+ u64 bo_offset = op->remap.next->gem.offset + unmap_end -
+ op->remap.next->va.addr;
+
+ ret = panthor_vm_map_pages(vm, unmap_end,
+ flags_to_prot(unmap_vma->flags),
+ bo->base.sgt, bo_offset,
+ aligned_unmap_end - unmap_end);
+ if (ret)
+ return ret;
+ }
+
next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
panthor_vma_init(next_vma, unmap_vma->flags);
}
Powered by blists - more mailing lists