[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230905090516.174c3524@collabora.com>
Date: Tue, 5 Sep 2023 09:05:16 +0200
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Dmitry Osipenko <dmitry.osipenko@...labora.com>
Cc: David Airlie <airlied@...il.com>,
Gerd Hoffmann <kraxel@...hat.com>,
Gurchetan Singh <gurchetansingh@...omium.org>,
Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
Christian König <christian.koenig@....com>,
Qiang Yu <yuq825@...il.com>,
Steven Price <steven.price@....com>,
Emma Anholt <emma@...olt.net>, Melissa Wen <mwen@...lia.com>,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
kernel@...labora.com, virtualization@...ts.linux-foundation.org
Subject: Re: [PATCH v16 14/20] drm/shmem-helper: Use refcount_t for
vmap_use_count
On Sun, 3 Sep 2023 20:07:30 +0300
Dmitry Osipenko <dmitry.osipenko@...labora.com> wrote:
> Use refcount_t helper for vmap_use_count to make refcounting consistent
> with pages_use_count and pages_pin_count that use refcount_t. This will
> allow to optimize unlocked vmappings by skipping reservation locking if
> refcnt > 1
nit: this optimization doesn't exist in practice, because the resv
lock is taken by the core, and ->v[un]map() are called with this lock
held.
> and also makes vmapping to benefit from the refcount_t's
> overflow checks.
>
> Suggested-by: Boris Brezillon <boris.brezillon@...labora.com>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
I agree with your 2 other points (consistency with other refcounting
primitives and safeness provided by refcount_t) so
Reviewed-by: Boris Brezillon <boris.brezillon@...labora.com>
but I'd recommend rephrasing/dropping the part mentioning the lock-free
optimization.
> ---
> drivers/gpu/drm/drm_gem_shmem_helper.c | 28 +++++++++++---------------
> include/drm/drm_gem_shmem_helper.h | 2 +-
> 2 files changed, 13 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 899f655a65bb..4633a418faba 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -144,7 +144,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
> } else if (!shmem->imported_sgt) {
> dma_resv_lock(shmem->base.resv, NULL);
>
> - drm_WARN_ON(obj->dev, shmem->vmap_use_count);
> + drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
>
> if (shmem->sgt) {
> dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
> @@ -345,23 +345,25 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
>
> dma_resv_assert_held(shmem->base.resv);
>
> - if (shmem->vmap_use_count++ > 0) {
> + if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
> iosys_map_set_vaddr(map, shmem->vaddr);
> return 0;
> }
>
> ret = drm_gem_shmem_pin_locked(shmem);
> if (ret)
> - goto err_zero_use;
> + return ret;
>
> if (shmem->map_wc)
> prot = pgprot_writecombine(prot);
> shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
> VM_MAP, prot);
> - if (!shmem->vaddr)
> + if (!shmem->vaddr) {
> ret = -ENOMEM;
> - else
> + } else {
> iosys_map_set_vaddr(map, shmem->vaddr);
> + refcount_set(&shmem->vmap_use_count, 1);
> + }
> }
>
> if (ret) {
> @@ -374,8 +376,6 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
> err_put_pages:
> if (!obj->import_attach)
> drm_gem_shmem_unpin_locked(shmem);
> -err_zero_use:
> - shmem->vmap_use_count = 0;
>
> return ret;
> }
> @@ -403,14 +403,10 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
> } else {
> dma_resv_assert_held(shmem->base.resv);
>
> - if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
> - return;
> -
> - if (--shmem->vmap_use_count > 0)
> - return;
> -
> - vunmap(shmem->vaddr);
> - drm_gem_shmem_unpin_locked(shmem);
> + if (refcount_dec_and_test(&shmem->vmap_use_count)) {
> + vunmap(shmem->vaddr);
> + drm_gem_shmem_unpin_locked(shmem);
> + }
> }
>
> shmem->vaddr = NULL;
> @@ -656,7 +652,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
>
> drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
> drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
> - drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
> + drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
> drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
> }
> EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> index 396958a98c34..63e91e8f2d5c 100644
> --- a/include/drm/drm_gem_shmem_helper.h
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -81,7 +81,7 @@ struct drm_gem_shmem_object {
> * Reference count on the virtual address.
> * The address are un-mapped when the count reaches zero.
> */
> - unsigned int vmap_use_count;
> + refcount_t vmap_use_count;
>
> /**
> * @got_pages_sgt:
Powered by blists - more mailing lists