[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251015153018.43735-3-loic.molinari@collabora.com>
Date: Wed, 15 Oct 2025 17:30:06 +0200
From: Loïc Molinari <loic.molinari@...labora.com>
To: Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>,
Jani Nikula <jani.nikula@...ux.intel.com>,
Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>,
Rodrigo Vivi <rodrigo.vivi@...el.com>,
Tvrtko Ursulin <tursulin@...ulin.net>,
Boris Brezillon <boris.brezillon@...labora.com>,
Rob Herring <robh@...nel.org>,
Steven Price <steven.price@....com>,
Liviu Dudau <liviu.dudau@....com>,
Melissa Wen <mwen@...lia.com>,
Maíra Canal <mcanal@...lia.com>,
Hugh Dickins <hughd@...gle.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Loïc Molinari <loic.molinari@...labora.com>,
Al Viro <viro@...iv.linux.org.uk>,
Mikołaj Wasiak <mikolaj.wasiak@...el.com>,
Christian Brauner <brauner@...nel.org>,
Nitin Gote <nitin.r.gote@...el.com>,
Andi Shyti <andi.shyti@...ux.intel.com>,
Jonathan Corbet <corbet@....net>,
Christopher Healy <healych@...zon.com>,
Matthew Wilcox <willy@...radead.org>,
Bagas Sanjaya <bagasdotme@...il.com>
Cc: linux-kernel@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
intel-gfx@...ts.freedesktop.org,
linux-mm@...ck.org,
linux-doc@...r.kernel.org,
kernel@...labora.com
Subject: [PATCH v4 02/13] drm/shmem-helper: Implement map_pages fault-around handler
This gives the mm subsystem the ability to increase fault handling
performance by proposing the insertion of a range of pages around the
faulty address in a single batch.
v4:
- Implement map_pages instead of huge_fault
Signed-off-by: Loïc Molinari <loic.molinari@...labora.com>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 53 +++++++++++++++++++++++---
1 file changed, 48 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index be89be1c804c..e151262332f9 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -567,24 +567,66 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
-static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+static bool drm_gem_shmem_fault_is_valid(struct drm_gem_object *obj,
+ pgoff_t pgoff)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
+ pgoff >= (obj->size >> PAGE_SHIFT) ||
+ shmem->madv < 0)
+ return false;
+
+ return true;
+}
+
+static vm_fault_t drm_gem_shmem_map_pages(struct vm_fault *vmf,
+ pgoff_t start_pgoff,
+ pgoff_t end_pgoff)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- loff_t num_pages = obj->size >> PAGE_SHIFT;
+ unsigned long addr, pfn;
vm_fault_t ret;
+
+ start_pgoff -= vma->vm_pgoff;
+ end_pgoff -= vma->vm_pgoff;
+ addr = vma->vm_start + (start_pgoff << PAGE_SHIFT);
+
+ dma_resv_lock(shmem->base.resv, NULL);
+
+ if (unlikely(!drm_gem_shmem_fault_is_valid(obj, start_pgoff))) {
+ ret = VM_FAULT_SIGBUS;
+ } else {
+ /* Map a range of pages around the faulty address. */
+ do {
+ pfn = page_to_pfn(shmem->pages[start_pgoff]);
+ ret = vmf_insert_pfn(vma, addr, pfn);
+ addr += PAGE_SIZE;
+ } while (++start_pgoff <= end_pgoff && ret == VM_FAULT_NOPAGE);
+ }
+
+ dma_resv_unlock(shmem->base.resv);
+
+ return ret;
+}
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct page *page;
pgoff_t page_offset;
+ vm_fault_t ret;
/* Offset to faulty address in the VMA (without the fake offset). */
page_offset = vmf->pgoff - vma->vm_pgoff;
dma_resv_lock(shmem->base.resv, NULL);
- if (page_offset >= num_pages ||
- drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
- shmem->madv < 0) {
+ if (unlikely(!drm_gem_shmem_fault_is_valid(obj, page_offset))) {
ret = VM_FAULT_SIGBUS;
} else {
page = shmem->pages[page_offset];
@@ -632,6 +674,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
}
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+ .map_pages = drm_gem_shmem_map_pages,
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
--
2.47.3
Powered by blists - more mailing lists