[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180917030006.245495-96-alexander.levin@microsoft.com>
Date: Mon, 17 Sep 2018 03:01:12 +0000
From: Sasha Levin <Alexander.Levin@...rosoft.com>
To: "stable@...r.kernel.org" <stable@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
CC: Daniel Vetter <daniel.vetter@...ll.ch>,
Daniel Vetter <daniel.vetter@...el.com>,
Laurent Pinchart <laurent.pinchart@...asonboard.com>,
Tomi Valkeinen <tomi.valkeinen@...com>,
Sasha Levin <Alexander.Levin@...rosoft.com>
Subject: [PATCH AUTOSEL 4.18 096/136] drm/omap: gem: Fix mm_list locking
From: Daniel Vetter <daniel.vetter@...ll.ch>
[ Upstream commit 5117bd898e8c0a31e8ab3a9b8523aecf0706e997 ]
- None of the list walkings where protected.
- Switch to a mutex since the list walking at device resume time can
sleep when pinning buffers through the tiler.
Only thing we need to be careful with here is that while we walk the
list we can't unreference any gem objects, since the final unref would
result in a recursive deadlock. But the only functions that walk the
list is the device resume and debugfs dumping, so all safe.
Signed-off-by: Daniel Vetter <daniel.vetter@...el.com>
Reviewed-by: Tomi Valkeinen <tomi.valkeinen@...com>
Reviewed-by: Laurent Pinchart <laurent.pinchart@...asonboard.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart@...asonboard.com>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@...com>
Signed-off-by: Sasha Levin <alexander.levin@...rosoft.com>
---
drivers/gpu/drm/omapdrm/omap_debugfs.c | 2 ++
drivers/gpu/drm/omapdrm/omap_drv.c | 2 +-
drivers/gpu/drm/omapdrm/omap_drv.h | 2 +-
drivers/gpu/drm/omapdrm/omap_gem.c | 15 +++++++++------
4 files changed, 13 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index b42e286616b0..84da7a5b84f3 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -37,7 +37,9 @@ static int gem_show(struct seq_file *m, void *arg)
return ret;
seq_printf(m, "All Objects:\n");
+ mutex_lock(&priv->list_lock);
omap_gem_describe_objects(&priv->obj_list, m);
+ mutex_unlock(&priv->list_lock);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index ef3b0e3571ec..5fcf9eaf3eaf 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -540,7 +540,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
- spin_lock_init(&priv->list_lock);
+ mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
/* Allocate and initialize the DRM device. */
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 6eaee4df4559..f27c8e216adf 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -71,7 +71,7 @@ struct omap_drm_private {
struct workqueue_struct *wq;
/* lock for obj_list below */
- spinlock_t list_lock;
+ struct mutex list_lock;
/* list of GEM objects: */
struct list_head obj_list;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 17a53d207978..7a029b892a37 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1001,6 +1001,7 @@ int omap_gem_resume(struct drm_device *dev)
struct omap_gem_object *omap_obj;
int ret = 0;
+ mutex_lock(&priv->list_lock);
list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
if (omap_obj->block) {
struct drm_gem_object *obj = &omap_obj->base;
@@ -1012,12 +1013,14 @@ int omap_gem_resume(struct drm_device *dev)
omap_obj->roll, true);
if (ret) {
dev_err(dev->dev, "could not repin: %d\n", ret);
- return ret;
+ goto done;
}
}
}
- return 0;
+done:
+ mutex_unlock(&priv->list_lock);
+ return ret;
}
#endif
@@ -1085,9 +1088,9 @@ void omap_gem_free_object(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- spin_lock(&priv->list_lock);
+ mutex_lock(&priv->list_lock);
list_del(&omap_obj->mm_list);
- spin_unlock(&priv->list_lock);
+ mutex_unlock(&priv->list_lock);
/* this means the object is still pinned.. which really should
* not happen. I think..
@@ -1206,9 +1209,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
goto err_release;
}
- spin_lock(&priv->list_lock);
+ mutex_lock(&priv->list_lock);
list_add(&omap_obj->mm_list, &priv->obj_list);
- spin_unlock(&priv->list_lock);
+ mutex_unlock(&priv->list_lock);
return obj;
--
2.17.1
Powered by blists - more mailing lists