[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250322212608.40511-11-dmitry.osipenko@collabora.com>
Date: Sun, 23 Mar 2025 00:26:08 +0300
From: Dmitry Osipenko <dmitry.osipenko@...labora.com>
To: David Airlie <airlied@...il.com>,
Simona Vetter <simona@...ll.ch>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
Christian König <christian.koenig@....com>,
Gerd Hoffmann <kraxel@...hat.com>,
Qiang Yu <yuq825@...il.com>,
Steven Price <steven.price@....com>,
Boris Brezillon <boris.brezillon@...labora.com>,
Frank Binns <frank.binns@...tec.com>,
Matt Coster <matt.coster@...tec.com>
Cc: dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org,
kernel@...labora.com
Subject: [PATCH v20 10/10] drm/shmem-helper: Use refcount_t for vmap_use_count
Use refcount_t helper for vmap_use_count to make refcounting consistent
with pages_use_count and pages_pin_count that use refcount_t. This also
makes vmapping to benefit from the refcount_t's overflow checks.
Acked-by: Maxime Ripard <mripard@...nel.org>
Reviewed-by: Boris Brezillon <boris.brezillon@...labora.com>
Suggested-by: Boris Brezillon <boris.brezillon@...labora.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 28 ++++++++++------------
drivers/gpu/drm/tests/drm_gem_shmem_test.c | 6 ++---
include/drm/drm_gem_shmem_helper.h | 2 +-
3 files changed, 16 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 84a196bbe44f..2d924d547a51 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -165,7 +165,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
} else {
dma_resv_lock(shmem->base.resv, NULL);
- drm_WARN_ON(obj->dev, shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
if (shmem->sgt) {
dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
@@ -355,23 +355,25 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
dma_resv_assert_held(shmem->base.resv);
- if (shmem->vmap_use_count++ > 0) {
+ if (refcount_inc_not_zero(&shmem->vmap_use_count)) {
iosys_map_set_vaddr(map, shmem->vaddr);
return 0;
}
ret = drm_gem_shmem_pin_locked(shmem);
if (ret)
- goto err_zero_use;
+ return ret;
if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
- if (!shmem->vaddr)
+ if (!shmem->vaddr) {
ret = -ENOMEM;
- else
+ } else {
iosys_map_set_vaddr(map, shmem->vaddr);
+ refcount_set(&shmem->vmap_use_count, 1);
+ }
}
if (ret) {
@@ -384,8 +386,6 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
err_put_pages:
if (!drm_gem_is_imported(obj))
drm_gem_shmem_unpin_locked(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
return ret;
}
@@ -413,14 +413,10 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
} else {
dma_resv_assert_held(shmem->base.resv);
- if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
-
- vunmap(shmem->vaddr);
- drm_gem_shmem_unpin_locked(shmem);
+ if (refcount_dec_and_test(&shmem->vmap_use_count)) {
+ vunmap(shmem->vaddr);
+ drm_gem_shmem_unpin_locked(shmem);
+ }
}
shmem->vaddr = NULL;
@@ -672,7 +668,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count));
drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count));
- drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", refcount_read(&shmem->vmap_use_count));
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info);
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index 1459cdb0c413..81cadaecdd4f 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -168,7 +168,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -177,7 +177,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
KUNIT_ASSERT_EQ(test, ret, 0);
KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
for (i = 0; i < TEST_SIZE; i++)
@@ -185,7 +185,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test)
drm_gem_shmem_vunmap_locked(shmem, &map);
KUNIT_EXPECT_NULL(test, shmem->vaddr);
- KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+ KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
}
/*
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 8b9bba87ae63..b4f993da3cae 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -82,7 +82,7 @@ struct drm_gem_shmem_object {
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int vmap_use_count;
+ refcount_t vmap_use_count;
/**
* @pages_mark_dirty_on_put:
--
2.49.0
Powered by blists - more mailing lists