[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231029230205.93277-19-dmitry.osipenko@collabora.com>
Date: Mon, 30 Oct 2023 02:01:57 +0300
From: Dmitry Osipenko <dmitry.osipenko@...labora.com>
To: David Airlie <airlied@...il.com>,
Gerd Hoffmann <kraxel@...hat.com>,
Gurchetan Singh <gurchetansingh@...omium.org>,
Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
Christian König <christian.koenig@....com>,
Qiang Yu <yuq825@...il.com>,
Steven Price <steven.price@....com>,
Boris Brezillon <boris.brezillon@...labora.com>,
Emma Anholt <emma@...olt.net>, Melissa Wen <mwen@...lia.com>
Cc: dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
kernel@...labora.com, virtualization@...ts.linux-foundation.org
Subject: [PATCH v18 18/26] drm/shmem-helper: Change sgt allocation policy
In a preparation to addition of drm-shmem memory shrinker support, change
the SGT allocation policy in this way:
1. SGT can be allocated only if shmem pages are pinned at the
time of allocation, otherwise allocation fails.
2. Drivers must ensure that pages are pinned during the time of SGT usage
and should get new SGT if pages were unpinned.
This new policy is required by the shrinker because it will move pages
to/from SWAP unless pages are pinned, invalidating SGT pointer once pages
are relocated.
Previous patches prepared drivers to the new policy.
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
---
drivers/gpu/drm/drm_gem_shmem_helper.c | 51 +++++++++++++-------------
1 file changed, 26 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index f371ebc6f85c..1420d2166b76 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -133,6 +133,14 @@ drm_gem_shmem_free_pages(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
+ if (shmem->sgt) {
+ dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(shmem->sgt);
+ kfree(shmem->sgt);
+ shmem->sgt = NULL;
+ }
+
#ifdef CONFIG_X86
if (shmem->map_wc)
set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
@@ -155,23 +163,12 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- if (obj->import_attach) {
+ if (obj->import_attach)
drm_prime_gem_destroy(obj, shmem->sgt);
- } else {
- drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
-
- if (shmem->sgt) {
- dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
- DMA_BIDIRECTIONAL, 0);
- sg_free_table(shmem->sgt);
- kfree(shmem->sgt);
- }
- if (shmem->pages)
- drm_gem_shmem_put_pages_locked(shmem);
- drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
- drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
- }
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count));
+ drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count));
drm_gem_object_release(obj);
kfree(shmem);
@@ -705,6 +702,9 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, obj->import_attach);
+ if (drm_WARN_ON(obj->dev, !shmem->pages))
+ return ERR_PTR(-ENOMEM);
+
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
@@ -720,15 +720,10 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
drm_WARN_ON(obj->dev, obj->import_attach);
- ret = drm_gem_shmem_get_pages_locked(shmem);
- if (ret)
- return ERR_PTR(ret);
-
sgt = drm_gem_shmem_get_sg_table(shmem);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_put_pages;
- }
+ if (IS_ERR(sgt))
+ return sgt;
+
/* Map the pages for use by the h/w. */
ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
@@ -741,8 +736,6 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
-err_put_pages:
- drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
@@ -759,6 +752,14 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
* and difference between dma-buf imported and natively allocated objects.
* drm_gem_shmem_get_sg_table() should not be directly called by drivers.
*
+ * Drivers should adhere to these SGT usage rules:
+ *
+ * 1. SGT should be allocated only if shmem pages are pinned at the
+ * time of allocation, otherwise allocation will fail.
+ *
+ * 2. Drivers should ensure that pages are pinned during the time of
+ * SGT usage and should get new SGT if pages were unpinned.
+ *
* Returns:
* A pointer to the scatter/gather table of pinned pages or errno on failure.
*/
--
2.41.0
Powered by blists - more mailing lists