lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251021113049.17242-3-loic.molinari@collabora.com>
Date: Tue, 21 Oct 2025 13:30:39 +0200
From: Loïc Molinari <loic.molinari@...labora.com>
To: Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
	Maxime Ripard <mripard@...nel.org>,
	Thomas Zimmermann <tzimmermann@...e.de>,
	David Airlie <airlied@...il.com>,
	Simona Vetter <simona@...ll.ch>,
	Jani Nikula <jani.nikula@...ux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@...el.com>,
	Tvrtko Ursulin <tursulin@...ulin.net>,
	Boris Brezillon <boris.brezillon@...labora.com>,
	Rob Herring <robh@...nel.org>,
	Steven Price <steven.price@....com>,
	Liviu Dudau <liviu.dudau@....com>,
	Melissa Wen <mwen@...lia.com>,
	Maíra Canal <mcanal@...lia.com>,
	Hugh Dickins <hughd@...gle.com>,
	Baolin Wang <baolin.wang@...ux.alibaba.com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Loïc Molinari <loic.molinari@...labora.com>,
	Al Viro <viro@...iv.linux.org.uk>,
	Mikołaj Wasiak <mikolaj.wasiak@...el.com>,
	Christian Brauner <brauner@...nel.org>,
	Nitin Gote <nitin.r.gote@...el.com>,
	Andi Shyti <andi.shyti@...ux.intel.com>,
	Jonathan Corbet <corbet@....net>,
	Christopher Healy <healych@...zon.com>,
	Matthew Wilcox <willy@...radead.org>,
	Bagas Sanjaya <bagasdotme@...il.com>
Cc: linux-kernel@...r.kernel.org,
	dri-devel@...ts.freedesktop.org,
	intel-gfx@...ts.freedesktop.org,
	linux-mm@...ck.org,
	linux-doc@...r.kernel.org,
	kernel@...labora.com
Subject: [PATCH v5 02/12] drm/shmem-helper: Implement map_pages fault-around handler

This gives the mm subsystem the ability to increase fault handling
performance by proposing the insertion of a range of pages around the
faulty address in a single batch.

v4:
- implement map_pages instead of huge_fault

v5:
- improve patch series progression
- use dma_resv_trylock() in map_pages (many thanks to Matthew Wilcox)
- validate map_pages range based on end_pgoff instead of start_pgoff

Signed-off-by: Loïc Molinari <loic.molinari@...labora.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 72 ++++++++++++++++++++++----
 1 file changed, 62 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index be89be1c804c..2a9fbc9c3712 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -567,31 +567,82 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
 
-static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+static bool drm_gem_shmem_fault_is_valid(struct drm_gem_object *obj,
+					 pgoff_t pgoff)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
+	    pgoff >= (obj->size >> PAGE_SHIFT) ||
+	    shmem->madv < 0)
+		return false;
+
+	return true;
+}
+
+static vm_fault_t drm_gem_shmem_map_pages(struct vm_fault *vmf,
+					  pgoff_t start_pgoff,
+					  pgoff_t end_pgoff)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
-	loff_t num_pages = obj->size >> PAGE_SHIFT;
+	struct page **pages = shmem->pages;
+	unsigned long addr, pfn;
 	vm_fault_t ret;
-	struct page *page;
+
+	start_pgoff -= vma->vm_pgoff;
+	end_pgoff -= vma->vm_pgoff;
+	addr = vma->vm_start + (start_pgoff << PAGE_SHIFT);
+
+	/* map_pages is called with the RCU lock for reading (sleep isn't
+	 * allowed) so just fall through to the more heavy-weight fault path.
+	 */
+	if (unlikely(!dma_resv_trylock(shmem->base.resv)))
+		return 0;
+
+	if (unlikely(!drm_gem_shmem_fault_is_valid(obj, end_pgoff))) {
+		ret = VM_FAULT_SIGBUS;
+		goto out;
+	}
+
+	/* Map a range of pages around the faulty address. */
+	do {
+		pfn = page_to_pfn(pages[start_pgoff]);
+		ret = vmf_insert_pfn(vma, addr, pfn);
+		addr += PAGE_SIZE;
+	} while (++start_pgoff <= end_pgoff && ret == VM_FAULT_NOPAGE);
+
+ out:
+	dma_resv_unlock(shmem->base.resv);
+
+	return ret;
+}
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+	struct page **pages = shmem->pages;
 	pgoff_t page_offset;
+	unsigned long pfn;
+	vm_fault_t ret;
 
 	/* Offset to faulty address in the VMA (without the fake offset). */
 	page_offset = vmf->pgoff - vma->vm_pgoff;
 
 	dma_resv_lock(shmem->base.resv, NULL);
 
-	if (page_offset >= num_pages ||
-	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
-	    shmem->madv < 0) {
+	if (unlikely(!drm_gem_shmem_fault_is_valid(obj, page_offset))) {
 		ret = VM_FAULT_SIGBUS;
-	} else {
-		page = shmem->pages[page_offset];
-
-		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
+		goto out;
 	}
 
+	pfn = page_to_pfn(pages[page_offset]);
+	ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
+ out:
 	dma_resv_unlock(shmem->base.resv);
 
 	return ret;
@@ -632,6 +683,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
 }
 
 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+	.map_pages = drm_gem_shmem_map_pages,
 	.fault = drm_gem_shmem_fault,
 	.open = drm_gem_shmem_vm_open,
 	.close = drm_gem_shmem_vm_close,
-- 
2.47.3


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ