[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20210401184548.607663-1-robdclark@gmail.com>
Date: Thu, 1 Apr 2021 11:45:48 -0700
From: Rob Clark <robdclark@...il.com>
To: dri-devel@...ts.freedesktop.org
Cc: Jordan Crouse <jordan@...micpenguin.net>,
Rob Clark <robdclark@...omium.org>,
Rob Clark <robdclark@...il.com>, Sean Paul <sean@...rly.run>,
David Airlie <airlied@...ux.ie>,
Daniel Vetter <daniel@...ll.ch>,
linux-arm-msm@...r.kernel.org (open list:DRM DRIVER FOR MSM ADRENO GPU),
freedreno@...ts.freedesktop.org (open list:DRM DRIVER FOR MSM ADRENO
GPU), linux-kernel@...r.kernel.org (open list)
Subject: [PATCH] drm/msm: Drop mm_lock in scan loop
From: Rob Clark <robdclark@...omium.org>
lock_stat + mmm_donut[1] say that this reduces contention on mm_lock
significantly (~350x lower waittime-max, and ~100x lower waittime-avg)
[1] https://chromium.googlesource.com/chromiumos/platform/microbenchmarks/+/refs/heads/main/mmm_donut.py
Signed-off-by: Rob Clark <robdclark@...omium.org>
---
drivers/gpu/drm/msm/msm_gem.c | 2 +-
drivers/gpu/drm/msm/msm_gem_shrinker.c | 48 ++++++++++++++++++++++----
2 files changed, 43 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2ecf7f1cef25..75cea5b801da 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -719,7 +719,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
- mark_unpurgable(msm_obj);
+ update_inactive(msm_obj);
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
drm_gem_free_mmap_offset(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index f3e948af01c5..6bbb15d64861 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -22,26 +22,62 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- struct msm_gem_object *msm_obj;
+ struct list_head still_in_list;
unsigned long freed = 0;
+ INIT_LIST_HEAD(&still_in_list);
+
mutex_lock(&priv->mm_lock);
- list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
- if (freed >= sc->nr_to_scan)
+ while (freed < sc->nr_to_scan) {
+ struct msm_gem_object *msm_obj = list_first_entry_or_null(
+ &priv->inactive_dontneed, typeof(*msm_obj), mm_list);
+
+ if (!msm_obj)
break;
- /* Use trylock, because we cannot block on a obj that
- * might be trying to acquire mm_lock
+
+ /*
+ * If it is in the process of being freed, msm_gem_free_object
+ * can be blocked on mm_lock waiting to remove it. So just
+ * skip it.
*/
- if (!msm_gem_trylock(&msm_obj->base))
+ if (!kref_get_unless_zero(&msm_obj->base.refcount))
continue;
+
+ /*
+ * Now that we own a reference, we can move it to our own
+ * private temporary list and drop mm_lock for the rest of
+ * the loop body, to reduce contention with the retire_submit
+ * path (which could make more objects available to purge)
+ */
+ list_move_tail(&msm_obj->mm_list, &still_in_list);
+
+ mutex_unlock(&priv->mm_lock);
+
+ /*
+ * Note that this still needs to be trylock, since we can
+ * hit shrinker in response to trying to get backing pages
+ * for this obj (ie. while it's lock is already held)
+ */
+ if (!msm_gem_trylock(&msm_obj->base))
+ goto tail;
+
if (is_purgeable(msm_obj)) {
+ /*
+ * This will move the obj out of still_in_list to
+ * the purged list
+ */
msm_gem_purge(&msm_obj->base);
freed += msm_obj->base.size >> PAGE_SHIFT;
}
msm_gem_unlock(&msm_obj->base);
+
+tail:
+ drm_gem_object_put(&msm_obj->base);
+ mutex_lock(&priv->mm_lock);
}
+ list_splice_tail(&still_in_list, &priv->inactive_dontneed);
mutex_unlock(&priv->mm_lock);
if (freed > 0) {
--
2.30.2
Powered by blists - more mailing lists