[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190308161716.2466-2-eric@anholt.net>
Date: Fri, 8 Mar 2019 08:17:13 -0800
From: Eric Anholt <eric@...olt.net>
To: dri-devel@...ts.freedesktop.org
Cc: linux-kernel@...r.kernel.org, Rob Herring <robh@...nel.org>,
david.emett@...adcom.com, thomas.spurden@...adcom.com,
Eric Anholt <eric@...olt.net>
Subject: [PATCH 1/4] drm: Add helpers for locking an array of BO reservations.
Now that we have the reservation object in the GEM object, it's easy
to provide a helper for this common case. Noticed while reviewing
panfrost and lima drivers. This particular version came out of v3d,
which in turn was a copy from vc4.
Signed-off-by: Eric Anholt <eric@...olt.net>
---
drivers/gpu/drm/drm_gem.c | 76 +++++++++++++++++++++++++++++++++++++++
include/drm/drm_gem.h | 4 +++
2 files changed, 80 insertions(+)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ad124f5a6f4d..9fd804f7d7ca 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1233,3 +1233,79 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
obj->dev->driver->gem_prime_vunmap(obj, vaddr);
}
EXPORT_SYMBOL(drm_gem_vunmap);
+
+/**
+ * drm_gem_lock_bo_reservations - Sets up the ww context and acquires
+ * the lock on an array of GEM objects.
+ *
+ * Once you've locked your reservations, you'll want to set up space
+ * for your shared fences (if applicable), submit your job, then
+ * drm_gem_unlock_reservations().
+ *
+ * @acquire_ctx - struct ww_acquire_ctx that will be initialized as
+ * part of tracking this set of locked reservations.
+ */
+int
+drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int contended = -1;
+ int i, ret;
+
+ ww_acquire_init(acquire_ctx, &reservation_ww_class);
+
+retry:
+ if (contended != -1) {
+ struct drm_gem_object *obj = objs[contended];
+
+ ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ if (i == contended)
+ continue;
+
+ ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ ww_mutex_unlock(&objs[j]->resv->lock);
+
+ if (contended != -1 && contended >= i)
+ ww_mutex_unlock(&objs[contended]->resv->lock);
+
+ if (ret == -EDEADLK) {
+ contended = i;
+ goto retry;
+ }
+
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ ww_acquire_done(acquire_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_lock_reservations);
+
+void
+drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ ww_mutex_unlock(&objs[i]->resv->lock);
+
+ ww_acquire_fini(acquire_ctx);
+}
+EXPORT_SYMBOL(drm_gem_unlock_reservations);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 25f1ff2df464..2955aaab3dca 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -384,6 +384,10 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout);
+int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx);
+void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
int drm_gem_dumb_destroy(struct drm_file *file,
--
2.20.1
Powered by blists - more mailing lists