[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230915090611.5c4f1ef7@collabora.com>
Date: Fri, 15 Sep 2023 09:06:11 +0200
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Dmitry Osipenko <dmitry.osipenko@...labora.com>
Cc: David Airlie <airlied@...il.com>,
Gerd Hoffmann <kraxel@...hat.com>,
Gurchetan Singh <gurchetansingh@...omium.org>,
Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
Christian König <christian.koenig@....com>,
Qiang Yu <yuq825@...il.com>,
Steven Price <steven.price@....com>,
Emma Anholt <emma@...olt.net>, Melissa Wen <mwen@...lia.com>,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
kernel@...labora.com, virtualization@...ts.linux-foundation.org
Subject: Re: [PATCH v17 07/18] drm/shmem-helper: Use refcount_t for
pages_use_count
On Fri, 15 Sep 2023 02:27:10 +0300
Dmitry Osipenko <dmitry.osipenko@...labora.com> wrote:
> Use atomic refcount_t helper for pages_use_count to optimize pin/unpin
> functions by skipping reservation locking while GEM's pin refcount > 1.
>
> Suggested-by: Boris Brezillon <boris.brezillon@...labora.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
Reviewed-by: Boris Brezillon <boris.brezillon@...labora.com>
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 33 +++++++++++--------------
> drivers/gpu/drm/lima/lima_gem.c | 2 +-
> drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +-
> include/drm/drm_gem_shmem_helper.h | 2 +-
> 4 files changed, 18 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 286f0ca51309..e9d9fbd52b34 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -155,7 +155,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> if (shmem->pages)
> drm_gem_shmem_put_pages_locked(shmem);
>
> - drm_WARN_ON(obj->dev, shmem->pages_use_count);
> + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
>
> dma_resv_unlock(shmem->base.resv);
> }
> @@ -172,14 +172,13 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
>
> dma_resv_assert_held(shmem->base.resv);
>
> - if (shmem->pages_use_count++ > 0)
> + if (refcount_inc_not_zero(&shmem->pages_use_count))
> return 0;
>
> pages = drm_gem_get_pages(obj);
> if (IS_ERR(pages)) {
> drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
> PTR_ERR(pages));
> - shmem->pages_use_count = 0;
> return PTR_ERR(pages);
> }
>
> @@ -195,6 +194,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
>
> shmem->pages = pages;
>
> + refcount_set(&shmem->pages_use_count, 1);
> +
> return 0;
> }
>
> @@ -210,21 +211,17 @@ void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
>
> dma_resv_assert_held(shmem->base.resv);
>
> - if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
> - return;
> -
> - if (--shmem->pages_use_count > 0)
> - return;
> -
> + if (refcount_dec_and_test(&shmem->pages_use_count)) {
> #ifdef CONFIG_X86
> - if (shmem->map_wc)
> - set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
> + if (shmem->map_wc)
> + set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
> #endif
>
> - drm_gem_put_pages(obj, shmem->pages,
> - shmem->pages_mark_dirty_on_put,
> - shmem->pages_mark_accessed_on_put);
> - shmem->pages = NULL;
> + drm_gem_put_pages(obj, shmem->pages,
> + shmem->pages_mark_dirty_on_put,
> + shmem->pages_mark_accessed_on_put);
> + shmem->pages = NULL;
> + }
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
>
> @@ -551,8 +548,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
> * mmap'd, vm_open() just grabs an additional reference for the new
> * mm the vma is getting copied into (ie. on fork()).
> */
> - if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
> - shmem->pages_use_count++;
> + drm_WARN_ON_ONCE(obj->dev,
> + !refcount_inc_not_zero(&shmem->pages_use_count));
>
> dma_resv_unlock(shmem->base.resv);
>
> @@ -640,7 +637,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
> return;
>
> drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
> - drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
> + drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
> drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
> drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
> }
> diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
> index 62d4a409faa8..988e74f67465 100644
> --- a/drivers/gpu/drm/lima/lima_gem.c
> +++ b/drivers/gpu/drm/lima/lima_gem.c
> @@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
> }
>
> bo->base.pages = pages;
> - bo->base.pages_use_count = 1;
> + refcount_set(&bo->base.pages_use_count, 1);
>
> mapping_set_unevictable(mapping);
> }
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 7771769f0ce0..a91252053aa3 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -487,7 +487,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
> goto err_unlock;
> }
> bo->base.pages = pages;
> - bo->base.pages_use_count = 1;
> + refcount_set(&bo->base.pages_use_count, 1);
> } else {
> pages = bo->base.pages;
> if (pages[page_offset]) {
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index 268b3127d150..3947f5c6bff7 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -37,7 +37,7 @@ struct drm_gem_shmem_object {
> * Reference count on the pages table.
> * The pages are put when the count reaches zero.
> */
> - unsigned int pages_use_count;
> + refcount_t pages_use_count;
>
> /**
> * @pages_pin_count:
Powered by blists - more mailing lists