lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <248083d2-b8f2-a4d7-099d-70a7e7859c11@suse.de>
Date:   Mon, 18 Apr 2022 20:38:45 +0200
From:   Thomas Zimmermann <tzimmermann@...e.de>
To:     Dmitry Osipenko <dmitry.osipenko@...labora.com>,
        David Airlie <airlied@...ux.ie>,
        Gerd Hoffmann <kraxel@...hat.com>,
        Gurchetan Singh <gurchetansingh@...omium.org>,
        Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
        Daniel Almeida <daniel.almeida@...labora.com>,
        Gert Wollny <gert.wollny@...labora.com>,
        Gustavo Padovan <gustavo.padovan@...labora.com>,
        Daniel Stone <daniel@...ishbar.org>,
        Tomeu Vizoso <tomeu.vizoso@...labora.com>,
        Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
        Maxime Ripard <mripard@...nel.org>,
        Rob Herring <robh@...nel.org>,
        Steven Price <steven.price@....com>,
        Alyssa Rosenzweig <alyssa.rosenzweig@...labora.com>,
        Rob Clark <robdclark@...il.com>,
        Emil Velikov <emil.l.velikov@...il.com>,
        Robin Murphy <robin.murphy@....com>
Cc:     Dmitry Osipenko <digetx@...il.com>, linux-kernel@...r.kernel.org,
        dri-devel@...ts.freedesktop.org,
        virtualization@...ts.linux-foundation.org
Subject: Re: [PATCH v4 10/15] drm/shmem-helper: Take reservation lock instead
 of drm_gem_shmem locks

Hi

Am 18.04.22 um 00:37 schrieb Dmitry Osipenko:
> Replace drm_gem_shmem locks with the reservation lock to make GEM
> lockings more consistent.
> 
> Previously drm_gem_shmem_vmap() and drm_gem_shmem_get_pages() were
> protected by separate locks, now it's the same lock, but it doesn't
> make any difference for the current GEM SHMEM users. Only Panfrost
> and Lima drivers use vmap() and they do it in the slow code paths,
> hence there was no practical justification for the usage of separate
> lock in the vmap().
> 
> Suggested-by: Daniel Vetter <daniel@...ll.ch>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
> ---
>   drivers/gpu/drm/drm_gem_shmem_helper.c  | 38 ++++++++++++-------------
>   drivers/gpu/drm/lima/lima_gem.c         |  8 +++---
>   drivers/gpu/drm/panfrost/panfrost_mmu.c | 15 ++++++----
>   include/drm/drm_gem_shmem_helper.h      | 10 -------
>   4 files changed, 31 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 30ee46348a99..3ecef571eff3 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -86,8 +86,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
>   	if (ret)
>   		goto err_release;
>   
> -	mutex_init(&shmem->pages_lock);
> -	mutex_init(&shmem->vmap_lock);
>   	INIT_LIST_HEAD(&shmem->madv_list);
>   
>   	if (!private) {
> @@ -157,8 +155,6 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
>   	WARN_ON(shmem->pages_use_count);
>   
>   	drm_gem_object_release(obj);
> -	mutex_destroy(&shmem->pages_lock);
> -	mutex_destroy(&shmem->vmap_lock);
>   	kfree(shmem);
>   }
>   EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
> @@ -209,11 +205,11 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
>   
>   	WARN_ON(shmem->base.import_attach);
>   
> -	ret = mutex_lock_interruptible(&shmem->pages_lock);
> +	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
>   	if (ret)
>   		return ret;
>   	ret = drm_gem_shmem_get_pages_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> @@ -248,9 +244,9 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
>    */
>   void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
>   {
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   	drm_gem_shmem_put_pages_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   }
>   EXPORT_SYMBOL(drm_gem_shmem_put_pages);
>   
> @@ -310,7 +306,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
>   	} else {
>   		pgprot_t prot = PAGE_KERNEL;
>   
> -		ret = drm_gem_shmem_get_pages(shmem);
> +		ret = drm_gem_shmem_get_pages_locked(shmem);
>   		if (ret)
>   			goto err_zero_use;
>   
> @@ -360,11 +356,11 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
>   {
>   	int ret;
>   
> -	ret = mutex_lock_interruptible(&shmem->vmap_lock);
> +	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
>   	if (ret)
>   		return ret;
>   	ret = drm_gem_shmem_vmap_locked(shmem, map);

Within drm_gem_shmem_vmap_locked(), there's a call to dma_buf_vmap() for 
imported pages. If the exporter side also holds/acquires the same 
reservation lock as our object, the whole thing can deadlock. We cannot 
move dma_buf_vmap() out of the CS, because we still need to increment 
the reference counter. I honestly don't know how to easily fix this 
problem. There's a TODO item about replacing these locks at [1]. As 
Daniel suggested this patch, we should talk to him about the issue.

Best regards
Thomas

[1] 
https://www.kernel.org/doc/html/latest/gpu/todo.html#move-buffer-object-locking-to-dma-resv-lock



> -	mutex_unlock(&shmem->vmap_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> @@ -385,7 +381,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>   		dma_buf_vunmap(obj->import_attach->dmabuf, map);
>   	} else {
>   		vunmap(shmem->vaddr);
> -		drm_gem_shmem_put_pages(shmem);
> +		drm_gem_shmem_put_pages_locked(shmem);
>   	}
>   
>   	shmem->vaddr = NULL;
> @@ -406,9 +402,11 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
>   void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
>   			  struct iosys_map *map)
>   {
> -	mutex_lock(&shmem->vmap_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   	drm_gem_shmem_vunmap_locked(shmem, map);
> -	mutex_unlock(&shmem->vmap_lock);
> +	dma_resv_unlock(shmem->base.resv);
> +
> +	drm_gem_shmem_update_purgeable_status(shmem);
>   }
>   EXPORT_SYMBOL(drm_gem_shmem_vunmap);
>   
> @@ -442,14 +440,14 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
>    */
>   int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
>   {
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   
>   	if (shmem->madv >= 0)
>   		shmem->madv = madv;
>   
>   	madv = shmem->madv;
>   
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return (madv >= 0);
>   }
> @@ -487,10 +485,10 @@ EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
>   
>   bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
>   {
> -	if (!mutex_trylock(&shmem->pages_lock))
> +	if (!dma_resv_trylock(shmem->base.resv))
>   		return false;
>   	drm_gem_shmem_purge_locked(shmem);
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return true;
>   }
> @@ -549,7 +547,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>   	/* We don't use vmf->pgoff since that has the fake offset */
>   	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
>   
> -	mutex_lock(&shmem->pages_lock);
> +	dma_resv_lock(shmem->base.resv, NULL);
>   
>   	if (page_offset >= num_pages ||
>   	    WARN_ON_ONCE(!shmem->pages) ||
> @@ -561,7 +559,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>   		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
>   	}
>   
> -	mutex_unlock(&shmem->pages_lock);
> +	dma_resv_unlock(shmem->base.resv);
>   
>   	return ret;
>   }
> diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
> index 0f1ca0b0db49..5008f0c2428f 100644
> --- a/drivers/gpu/drm/lima/lima_gem.c
> +++ b/drivers/gpu/drm/lima/lima_gem.c
> @@ -34,7 +34,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   
>   	new_size = min(new_size, bo->base.base.size);
>   
> -	mutex_lock(&bo->base.pages_lock);
> +	dma_resv_lock(bo->base.base.resv, NULL);
>   
>   	if (bo->base.pages) {
>   		pages = bo->base.pages;
> @@ -42,7 +42,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
>   				       sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
>   		if (!pages) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(bo->base.base.resv);
>   			return -ENOMEM;
>   		}
>   
> @@ -56,13 +56,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
>   		struct page *page = shmem_read_mapping_page(mapping, i);
>   
>   		if (IS_ERR(page)) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(bo->base.base.resv);
>   			return PTR_ERR(page);
>   		}
>   		pages[i] = page;
>   	}
>   
> -	mutex_unlock(&bo->base.pages_lock);
> +	dma_resv_unlock(bo->base.base.resv);
>   
>   	ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
>   					new_size, GFP_KERNEL);
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index d3f82b26a631..404b8f67e2df 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -424,6 +424,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	struct panfrost_gem_mapping *bomapping;
>   	struct panfrost_gem_object *bo;
>   	struct address_space *mapping;
> +	struct drm_gem_object *obj;
>   	pgoff_t page_offset;
>   	struct sg_table *sgt;
>   	struct page **pages;
> @@ -446,13 +447,15 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	page_offset = addr >> PAGE_SHIFT;
>   	page_offset -= bomapping->mmnode.start;
>   
> -	mutex_lock(&bo->base.pages_lock);
> +	obj = &bo->base.base;
> +
> +	dma_resv_lock(obj->resv, NULL);
>   
>   	if (!bo->base.pages) {
>   		bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
>   				     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
>   		if (!bo->sgts) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = -ENOMEM;
>   			goto err_bo;
>   		}
> @@ -462,7 +465,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   		if (!pages) {
>   			kvfree(bo->sgts);
>   			bo->sgts = NULL;
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = -ENOMEM;
>   			goto err_bo;
>   		}
> @@ -472,7 +475,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   		pages = bo->base.pages;
>   		if (pages[page_offset]) {
>   			/* Pages are already mapped, bail out. */
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			goto out;
>   		}
>   	}
> @@ -483,13 +486,13 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
>   	for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
>   		pages[i] = shmem_read_mapping_page(mapping, i);
>   		if (IS_ERR(pages[i])) {
> -			mutex_unlock(&bo->base.pages_lock);
> +			dma_resv_unlock(obj->resv);
>   			ret = PTR_ERR(pages[i]);
>   			goto err_pages;
>   		}
>   	}
>   
> -	mutex_unlock(&bo->base.pages_lock);
> +	dma_resv_unlock(obj->resv);
>   
>   	sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
>   	ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index d0a57853c188..70889533962a 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -26,11 +26,6 @@ struct drm_gem_shmem_object {
>   	 */
>   	struct drm_gem_object base;
>   
> -	/**
> -	 * @pages_lock: Protects the page table and use count
> -	 */
> -	struct mutex pages_lock;
> -
>   	/**
>   	 * @pages: Page table
>   	 */
> @@ -79,11 +74,6 @@ struct drm_gem_shmem_object {
>   	 */
>   	struct sg_table *sgt;
>   
> -	/**
> -	 * @vmap_lock: Protects the vmap address and use count
> -	 */
> -	struct mutex vmap_lock;
> -
>   	/**
>   	 * @vaddr: Kernel virtual address of the backing memory
>   	 */

-- 
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Ivo Totev

Download attachment "OpenPGP_signature" of type "application/pgp-signature" (841 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ