[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251030230357.45070-3-mohamedahmedegypt2001@gmail.com>
Date: Fri, 31 Oct 2025 01:03:54 +0200
From: Mohamed Ahmed <mohamedahmedegypt2001@...il.com>
To: linux-kernel@...r.kernel.org
Cc: dri-devel@...ts.freedesktop.org,
Mary Guillemard <mary@...y.zone>,
Faith Ekstrand <faith.ekstrand@...labora.com>,
Lyude Paul <lyude@...hat.com>,
Danilo Krummrich <dakr@...nel.org>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>,
nouveau@...ts.freedesktop.org,
Mohamed Ahmed <mohamedahmedegypt2001@...il.com>
Subject: [PATCH v3 2/5] drm/nouveau/uvmm: Allow larger pages
From: Mary Guillemard <mary@...y.zone>
Now that everything in UVMM knows about the variable page shift, we can
select larger values.
The proposed approach relies on nouveau_bo::page unless if it would cause
alignment issues (in which case we fall back to searching for an
appropriate shift)
Signed-off-by: Mary Guillemard <mary@...y.zone>
Co-developed-by: Mohamed Ahmed <mohamedahmedegypt2001@...il.com>
Signed-off-by: Mohamed Ahmed <mohamedahmedegypt2001@...il.com>
---
drivers/gpu/drm/nouveau/nouveau_uvmm.c | 60 +++++++++++++++++++++++++-
1 file changed, 58 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 2cd0835b05e8..f2d032f665e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -454,6 +454,62 @@ op_unmap_prepare_unwind(struct drm_gpuva *va)
drm_gpuva_insert(va->vm, va);
}
+static bool
+op_map_aligned_to_page_shift(const struct drm_gpuva_op_map *op, u8 page_shift)
+{
+ u64 non_page_bits = (1ULL << page_shift) - 1;
+
+ return op->va.addr & non_page_bits == 0 &&
+ op->va.range & non_page_bits == 0 &&
+ op->gem.offset & non_page_bits == 0;
+}
+
+static u8
+select_page_shift(struct nouveau_uvmm *uvmm, struct drm_gpuva_op_map *op)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(op->gem.obj);
+
+ /* nouveau_bo_fixup_align() guarantees that the page size will be aligned
+ * for most cases, but it can't handle cases where userspace allocates with
+ * a size and then binds with a smaller granularity. So in order to avoid
+ * breaking old userspace, we need to ensure that the VA is actually
+ * aligned before using it, and if it isn't, then we downgrade to the first
+ * granularity that will fit, which is optimal from a correctness and
+ * performance perspective.
+ */
+ if (op_map_aligned_to_page_shift(op, nvbo->page))
+ return nvbo->page;
+
+ struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+ int i;
+
+ /* If the given granularity doesn't fit, let's find one that will fit. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Ignore anything that is bigger or identical to the BO preference. */
+ if (vmm->page[i].shift >= nvbo->page)
+ continue;
+
+ /* Skip incompatible domains. */
+ if ((mem->mem.type & NVIF_MEM_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((mem->mem.type & NVIF_MEM_HOST) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
+
+ /* If it fits, return the proposed shift. */
+ if (op_map_aligned_to_page_shift(op, vmm->page[i].shift))
+ return vmm->page[i].shift;
+ }
+
+ /* If we get here then nothing can reconcile the requirements. This should never
+ * happen.
+ */
+ WARN_ON(1);
+
+ return PAGE_SHIFT;
+}
+
static void
nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
@@ -506,7 +562,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
if (vmm_get_range)
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
vmm_get_range,
- PAGE_SHIFT);
+ select_page_shift(uvmm, &op->map));
break;
}
case DRM_GPUVA_OP_REMAP: {
@@ -599,7 +655,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
uvma->region = args->region;
uvma->kind = args->kind;
- uvma->page_shift = PAGE_SHIFT;
+ uvma->page_shift = select_page_shift(uvmm, op);
drm_gpuva_map(&uvmm->base, &uvma->va, op);
--
2.51.1
Powered by blists - more mailing lists