lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250305152555.318159-3-ryasuoka@redhat.com>
Date: Thu,  6 Mar 2025 00:25:54 +0900
From: Ryosuke Yasuoka <ryasuoka@...hat.com>
To: maarten.lankhorst@...ux.intel.com,
	mripard@...nel.org,
	tzimmermann@...e.de,
	airlied@...il.com,
	simona@...ll.ch,
	kraxel@...hat.com,
	gurchetansingh@...omium.org,
	olvaffe@...il.com,
	akpm@...ux-foundation.org,
	urezki@...il.com,
	hch@...radead.org,
	dmitry.osipenko@...labora.com,
	jfalempe@...hat.com
Cc: Ryosuke Yasuoka <ryasuoka@...hat.com>,
	dri-devel@...ts.freedesktop.org,
	linux-kernel@...r.kernel.org,
	virtualization@...ts.linux.dev,
	linux-mm@...ck.org
Subject: [PATCH drm-next 2/2] drm/virtio: Use atomic_vmap to work drm_panic in GUI

virtio drm_panic supports only vmapped shmem BO because there is no
atomic vmap feature. Now atomic_vmap is supported, so drm_panic tries to
vmap addr if it is not mapped.

Signed-off-by: Ryosuke Yasuoka <ryasuoka@...hat.com>
---
 drivers/gpu/drm/drm_gem.c              | 51 ++++++++++++++++++++++++++
 drivers/gpu/drm/drm_gem_shmem_helper.c | 51 ++++++++++++++++++++++++++
 drivers/gpu/drm/virtio/virtgpu_plane.c | 14 +++++--
 include/drm/drm_gem.h                  |  1 +
 include/drm/drm_gem_shmem_helper.h     |  2 +
 5 files changed, 116 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ee811764c3df..eebfaef3a52e 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -535,6 +535,57 @@ static void drm_gem_check_release_batch(struct folio_batch *fbatch)
 	cond_resched();
 }
 
+struct page **drm_gem_atomic_get_pages(struct drm_gem_object *obj)
+{
+	struct address_space *mapping;
+	struct page **pages;
+	struct folio *folio;
+	long i, j, npages;
+
+	if (WARN_ON(!obj->filp))
+		return ERR_PTR(-EINVAL);
+
+	/* This is the shared memory object that backs the GEM resource */
+	mapping = obj->filp->f_mapping;
+
+	/* We already BUG_ON() for non-page-aligned sizes in
+	 * drm_gem_object_init(), so we should never hit this unless
+	 * driver author is doing something really wrong:
+	 */
+	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
+	if (pages == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	mapping_set_unevictable(mapping);
+
+	i = 0;
+	while (i < npages) {
+		long nr;
+
+		folio = shmem_read_folio_gfp(mapping, i,
+				GFP_ATOMIC);
+		if (IS_ERR(folio))
+			return ERR_PTR(-ENOMEM);
+		nr = min(npages - i, folio_nr_pages(folio));
+		for (j = 0; j < nr; j++, i++)
+			pages[i] = folio_file_page(folio, i);
+
+		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
+		 * correct region during swapin. Note that this requires
+		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
+		 * so shmem can relocate pages during swapin if required.
+		 */
+		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
+				(folio_pfn(folio) >= 0x00100000UL));
+	}
+
+	return pages;
+}
+
 /**
  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  * from shmem
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5ab351409312..789dfd726a36 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -186,6 +186,34 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
 }
 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
 
+static int drm_gem_shmem_atomic_get_pages(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+	struct page **pages;
+
+	pages = drm_gem_atomic_get_pages(obj);
+	if (IS_ERR(pages)) {
+		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
+			    PTR_ERR(pages));
+		shmem->pages_use_count = 0;
+		return PTR_ERR(pages);
+	}
+
+	/*
+	 * TODO: Allocating WC pages which are correctly flushed is only
+	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
+	 * ttm_pool.c could use.
+	 */
+#ifdef CONFIG_X86
+	if (shmem->map_wc)
+		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
+#endif
+
+	shmem->pages = pages;
+
+	return 0;
+}
+
 static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
 {
 	struct drm_gem_object *obj = &shmem->base;
@@ -317,6 +345,29 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
 }
 EXPORT_SYMBOL(drm_gem_shmem_unpin);
 
+int drm_gem_shmem_atomic_vmap(struct drm_gem_shmem_object *shmem,
+			      struct iosys_map *map)
+{
+	struct drm_gem_object *obj = &shmem->base;
+	int ret = 0;
+
+	pgprot_t prot = PAGE_KERNEL;
+
+	ret = drm_gem_shmem_atomic_get_pages(shmem);
+	if (ret)
+		return -ENOMEM;
+
+	if (shmem->map_wc)
+		prot = pgprot_writecombine(prot);
+	shmem->vaddr = atomic_vmap(shmem->pages, obj->size >> PAGE_SHIFT,
+				   VM_MAP, prot);
+	if (!shmem->vaddr)
+		return -ENOMEM;
+	iosys_map_set_vaddr(map, shmem->vaddr);
+
+	return 0;
+}
+
 /*
  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
  * @shmem: shmem GEM object
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a6f5a78f436a..2a977c5cf42a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -500,11 +500,19 @@ static int virtio_drm_get_scanout_buffer(struct drm_plane *plane,
 
 	bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
 
-	/* Only support mapped shmem bo */
-	if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach || !bo->base.vaddr)
+	if (virtio_gpu_is_vram(bo) || bo->base.base.import_attach)
 		return -ENODEV;
 
-	iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+	/* try to vmap it if possible */
+	if (!bo->base.vaddr) {
+		int ret;
+
+		ret = drm_gem_shmem_atomic_vmap(&bo->base, &sb->map[0]);
+		if (ret)
+			return ret;
+	} else {
+		iosys_map_set_vaddr(&sb->map[0], bo->base.vaddr);
+	}
 
 	sb->format = plane->state->fb->format;
 	sb->height = plane->state->fb->height;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index fdae947682cd..cfed66bc12ef 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -529,6 +529,7 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
 
+struct page **drm_gem_atomic_get_pages(struct drm_gem_object *obj);
 struct page **drm_gem_get_pages(struct drm_gem_object *obj);
 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 		bool dirty, bool accessed);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index d22e3fb53631..86a357945f42 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -105,6 +105,8 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_atomic_vmap(struct drm_gem_shmem_object *shmem,
+			      struct iosys_map *map);
 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
 		       struct iosys_map *map);
 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
-- 
2.48.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ