lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190716174331.7371-3-robdclark@gmail.com>
Date:   Tue, 16 Jul 2019 10:43:23 -0700
From:   Rob Clark <robdclark@...il.com>
To:     dri-devel@...ts.freedesktop.org
Cc:     Chris Wilson <chris@...is-wilson.co.uk>,
        Intel Graphics Development <intel-gfx@...ts.freedesktop.org>,
        Rob Clark <robdclark@...omium.org>,
        David Airlie <airlied@...ux.ie>,
        Daniel Vetter <daniel@...ll.ch>,
        Emil Velikov <emil.velikov@...labora.com>,
        Deepak Sharma <deepak.sharma@....com>,
        Imre Deak <imre.deak@...el.com>,
        Thomas Zimmermann <tzimmermann@...e.de>,
        Eric Biggers <ebiggers@...gle.com>,
        linux-kernel@...r.kernel.org
Subject: [PATCH v2 3/3] drm/vgem: use normal cached mmap'ings

From: Rob Clark <robdclark@...omium.org>

Since there is no real device associated with VGEM, it is impossible to
end up with appropriate dev->dma_ops, meaning that we have no way to
invalidate the shmem pages allocated by VGEM.  So, at least on platforms
without drm_cflush_pages(), we end up with corruption when cache lines
from previous usage of VGEM bo pages get evicted to memory.

The only sane option is to use cached mappings.

Signed-off-by: Rob Clark <robdclark@...omium.org>
---
 drivers/gpu/drm/vgem/vgem_drv.c | 24 ++++++++++++++----------
 1 file changed, 14 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index a179e962b79e..b6071a466b92 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -259,9 +259,6 @@ static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
 	if (ret)
 		return ret;
 
-	/* Keep the WC mmaping set by drm_gem_mmap() but our pages
-	 * are ordinary and not special.
-	 */
 	vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
 	return 0;
 }
@@ -310,17 +307,17 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
 static int vgem_prime_pin(struct drm_gem_object *obj, struct device *dev)
 {
 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-	long n_pages = obj->size >> PAGE_SHIFT;
+	long i, n_pages = obj->size >> PAGE_SHIFT;
 	struct page **pages;
 
 	pages = vgem_pin_pages(bo);
 	if (IS_ERR(pages))
 		return PTR_ERR(pages);
 
-	/* Flush the object from the CPU cache so that importers can rely
-	 * on coherent indirect access via the exported dma-address.
-	 */
-	drm_clflush_pages(pages, n_pages);
+	for (i = 0; i < n_pages; i++) {
+		dma_sync_single_for_device(dev, page_to_phys(pages[i]),
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+	}
 
 	return 0;
 }
@@ -328,6 +325,13 @@ static int vgem_prime_pin(struct drm_gem_object *obj, struct device *dev)
 static void vgem_prime_unpin(struct drm_gem_object *obj, struct device *dev)
 {
 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+	long i, n_pages = obj->size >> PAGE_SHIFT;
+	struct page **pages = bo->pages;
+
+	for (i = 0; i < n_pages; i++) {
+		dma_sync_single_for_cpu(dev, page_to_phys(pages[i]),
+					PAGE_SIZE, DMA_BIDIRECTIONAL);
+	}
 
 	vgem_unpin_pages(bo);
 }
@@ -382,7 +386,7 @@ static void *vgem_prime_vmap(struct drm_gem_object *obj)
 	if (IS_ERR(pages))
 		return NULL;
 
-	return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+	return vmap(pages, n_pages, 0, PAGE_KERNEL);
 }
 
 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -411,7 +415,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
 	fput(vma->vm_file);
 	vma->vm_file = get_file(obj->filp);
 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
 	return 0;
 }
-- 
2.21.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ