[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240105184624.508603-19-dmitry.osipenko@collabora.com>
Date: Fri, 5 Jan 2024 21:46:12 +0300
From: Dmitry Osipenko <dmitry.osipenko@...labora.com>
To: David Airlie <airlied@...il.com>,
Gerd Hoffmann <kraxel@...hat.com>,
Gurchetan Singh <gurchetansingh@...omium.org>,
Chia-I Wu <olvaffe@...il.com>,
Daniel Vetter <daniel@...ll.ch>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
Christian König <christian.koenig@....com>,
Qiang Yu <yuq825@...il.com>,
Steven Price <steven.price@....com>,
Boris Brezillon <boris.brezillon@...labora.com>,
Emma Anholt <emma@...olt.net>,
Melissa Wen <mwen@...lia.com>
Cc: dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org,
kernel@...labora.com,
virtualization@...ts.linux-foundation.org
Subject: [PATCH v19 18/30] drm/panfrost: Explicitly get and put drm-shmem pages
To simplify the drm-shmem refcnt handling, we're moving away from
the implicit get_pages() that is used by get_pages_sgt(). From now on
drivers will have to pin pages while they use sgt. Panfrost's shrinker
doesn't support swapping out BOs, hence pages are pinned and sgt is valid
as long as pages' use-count > 0.
In Panfrost, panfrost_gem_mapping, which is the object representing a
GPU mapping of a BO, owns a pages ref. This guarantees that any BO being
mapped GPU side has its pages retained till the mapping is destroyed.
Since pages are no longer guaranteed to stay pinned for the BO lifetime,
and MADVISE(DONT_NEED) flagging remains after the GEM handle has been
destroyed, we need to add an extra 'is_purgeable' check in
panfrost_gem_purge(), to make sure we're not trying to purge a BO that
already had its pages released.
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
---
drivers/gpu/drm/panfrost/panfrost_gem.c | 63 ++++++++++++++-----
.../gpu/drm/panfrost/panfrost_gem_shrinker.c | 6 ++
2 files changed, 52 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index f268bd5c2884..7edfc12f7c1f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -35,20 +35,6 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
*/
WARN_ON_ONCE(!list_empty(&bo->mappings.list));
- if (bo->sgts) {
- int i;
- int n_sgt = bo->base.base.size / SZ_2M;
-
- for (i = 0; i < n_sgt; i++) {
- if (bo->sgts[i].sgl) {
- dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
- DMA_BIDIRECTIONAL, 0);
- sg_free_table(&bo->sgts[i]);
- }
- }
- kvfree(bo->sgts);
- }
-
drm_gem_shmem_free(&bo->base);
}
@@ -85,11 +71,40 @@ panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
static void panfrost_gem_mapping_release(struct kref *kref)
{
- struct panfrost_gem_mapping *mapping;
-
- mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+ struct panfrost_gem_mapping *mapping =
+ container_of(kref, struct panfrost_gem_mapping, refcount);
+ struct panfrost_gem_object *bo = mapping->obj;
+ struct panfrost_device *pfdev = bo->base.base.dev->dev_private;
panfrost_gem_teardown_mapping(mapping);
+
+ /* On heap BOs, release the sgts created in the fault handler path. */
+ if (bo->sgts) {
+ int i, n_sgt = bo->base.base.size / SZ_2M;
+
+ for (i = 0; i < n_sgt; i++) {
+ if (bo->sgts[i].sgl) {
+ dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(&bo->sgts[i]);
+ }
+ }
+ kvfree(bo->sgts);
+ }
+
+ /* Pages ref is owned by the panfrost_gem_mapping object. We must
+ * release our pages ref (if any), before releasing the object
+ * ref.
+ * Non-heap BOs acquired the pages at panfrost_gem_mapping creation
+ * time, and heap BOs may have acquired pages if the fault handler
+ * was called, in which case bo->sgts should be non-NULL.
+ */
+ if (!bo->base.base.import_attach && (!bo->is_heap || bo->sgts) &&
+ bo->base.madv >= 0) {
+ drm_gem_shmem_put_pages(&bo->base);
+ bo->sgts = NULL;
+ }
+
drm_gem_object_put(&mapping->obj->base.base);
panfrost_mmu_ctx_put(mapping->mmu);
kfree(mapping);
@@ -125,6 +140,20 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
if (!mapping)
return -ENOMEM;
+ if (!bo->is_heap && !bo->base.base.import_attach) {
+ /* Pages ref is owned by the panfrost_gem_mapping object.
+ * For non-heap BOs, we request pages at mapping creation
+ * time, such that the panfrost_mmu_map() call, further down in
+ * this function, is guaranteed to have pages_use_count > 0
+ * when drm_gem_shmem_get_pages_sgt() is called.
+ */
+ ret = drm_gem_shmem_get_pages(&bo->base);
+ if (ret) {
+ kfree(mapping);
+ return ret;
+ }
+ }
+
INIT_LIST_HEAD(&mapping->node);
kref_init(&mapping->refcount);
drm_gem_object_get(obj);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 02b60ea1433a..d4fb0854cf2f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -50,6 +50,12 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
if (!dma_resv_trylock(shmem->base.resv))
goto unlock_mappings;
+ /* BO might have become unpurgeable if the last pages_use_count ref
+ * was dropped, but the BO hasn't been destroyed yet.
+ */
+ if (!drm_gem_shmem_is_purgeable(shmem))
+ goto unlock_mappings;
+
panfrost_gem_teardown_mappings_locked(bo);
drm_gem_shmem_purge_locked(&bo->base);
ret = true;
--
2.43.0
Powered by blists - more mailing lists