lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 25 Jun 2019 15:55:06 +0200
From:   "Christian König" 
        <ckoenig.leichtzumerken@...il.com>
To:     daniel@...ll.ch, peterz@...radead.org,
        dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org
Subject: [PATCH 1/2] dma-buf: add reservation_context for deadlock handling

The ww_mutex framework allows for detecting deadlocks when multiple
threads try to acquire the same set of locks in different order.

The problem is that handling those deadlocks was the burden of the user of
the ww_mutex implementation and at least some users didn't got that right
on the first try.

So introduce a new reservation_context object which can be used to
simplify the deadlock handling. This is done by tracking all locked
reservation objects in the context as well as the last contended
reservation object.

When a deadlock occurse we now unlock all previously locked object and
acquire the contended lock in the slow path. After this is done -EDEADLK
is still returned to signal that all other locks now need to be
re-acquired again.

Signed-off-by: Christian König <christian.koenig@....com>
---
 drivers/dma-buf/reservation.c | 82 +++++++++++++++++++++++++++++++++++
 include/linux/reservation.h   | 38 ++++++++++++++++
 2 files changed, 120 insertions(+)

diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 4d32e2c67862..9e53e42b053a 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -55,6 +55,88 @@ EXPORT_SYMBOL(reservation_seqcount_class);
 const char reservation_seqcount_string[] = "reservation_seqcount";
 EXPORT_SYMBOL(reservation_seqcount_string);
 
+/**
+ * reservation_context_init - initialize a reservation context
+ * @ctx: the context to initialize
+ *
+ * Start using this reservation context to lock reservation objects for update.
+ */
+void reservation_context_init(struct reservation_context *ctx)
+{
+	ww_acquire_init(&ctx->ctx, &reservation_ww_class);
+	init_llist_head(&ctx->locked);
+	ctx->contended = NULL;
+}
+EXPORT_SYMBOL(reservation_context_init);
+
+/**
+ * reservation_context_unlock_all - unlock all reservation objects
+ * @ctx: the context which holds the reservation objects
+ *
+ * Unlocks all reservation objects locked with this context.
+ */
+void reservation_context_unlock_all(struct reservation_context *ctx)
+{
+	struct reservation_object *obj, *next;
+
+	if (ctx->contended)
+		ww_mutex_unlock(&ctx->contended->lock);
+	ctx->contended = NULL;
+
+	llist_for_each_entry_safe(obj, next, ctx->locked.first, locked)
+		ww_mutex_unlock(&obj->lock);
+	init_llist_head(&ctx->locked);
+}
+EXPORT_SYMBOL(reservation_context_unlock_all);
+
+/**
+ * reservation_context_lock - lock a reservation object with deadlock handling
+ * @ctx: the context which should be used to lock the object
+ * @obj: the object which needs to be locked
+ * @interruptible: if we should wait interruptible or not
+ *
+ * Use @ctx to lock the reservation object. If a deadlock is detected we backoff
+ * by releasing all locked objects and use the slow path to lock the reservation
+ * object. After successfully locking in the slow path -EDEADLK is returned to
+ * signal that all other locks must be re-taken as well.
+ */
+int reservation_context_lock(struct reservation_context *ctx,
+			     struct reservation_object *obj,
+			     bool interruptible)
+{
+	int ret = 0;
+
+	if (unlikely(ctx->contended == obj))
+		ctx->contended = NULL;
+	else if (interruptible)
+		ret = ww_mutex_lock_interruptible(&obj->lock, &ctx->ctx);
+	else
+		ret = ww_mutex_lock(&obj->lock, &ctx->ctx);
+
+	if (likely(!ret)) {
+		/* don't use llist_add here, we have separate locking */
+		obj->locked.next = ctx->locked.first;
+		ctx->locked.first = &obj->locked;
+		return 0;
+	}
+	if (unlikely(ret != -EDEADLK))
+		return ret;
+
+	reservation_context_unlock_all(ctx);
+
+	if (interruptible) {
+		ret = ww_mutex_lock_slow_interruptible(&obj->lock, &ctx->ctx);
+		if (unlikely(ret))
+			return ret;
+	} else {
+		ww_mutex_lock_slow(&obj->lock, &ctx->ctx);
+	}
+
+	ctx->contended = obj;
+	return -EDEADLK;
+}
+EXPORT_SYMBOL(reservation_context_lock);
+
 /**
  * reservation_object_reserve_shared - Reserve space to add shared fences to
  * a reservation_object.
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index ee750765cc94..a8a52e5d3e80 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -44,11 +44,48 @@
 #include <linux/slab.h>
 #include <linux/seqlock.h>
 #include <linux/rcupdate.h>
+#include <linux/llist.h>
 
 extern struct ww_class reservation_ww_class;
 extern struct lock_class_key reservation_seqcount_class;
 extern const char reservation_seqcount_string[];
 
+/**
+ * struct reservation_context - context to lock reservation objects
+ * @ctx: ww_acquire_ctx used for deadlock detection
+ * @locked: list of reservation objects locked in this context
+ * @contended: contended reservation object
+ */
+struct reservation_context {
+	struct ww_acquire_ctx ctx;
+	struct llist_head locked;
+	struct reservation_object *contended;
+};
+
+/**
+ * reservation_context_done - wrapper for ww_acquire_done
+ * @ctx: the reservation context which is done with locking
+ */
+static inline void reservation_context_done(struct reservation_context *ctx)
+{
+	ww_acquire_done(&ctx->ctx);
+}
+
+/**
+ * reservation_context_fini - wrapper for ww_acquire_fini
+ * @ctx: the reservation context which is finished
+ */
+static inline void reservation_context_fini(struct reservation_context *ctx)
+{
+	ww_acquire_fini(&ctx->ctx);
+}
+
+void reservation_context_init(struct reservation_context *ctx);
+void reservation_context_unlock_all(struct reservation_context *ctx);
+int reservation_context_lock(struct reservation_context *ctx,
+			     struct reservation_object *obj,
+			     bool interruptible);
+
 /**
  * struct reservation_object_list - a list of shared fences
  * @rcu: for internal use
@@ -71,6 +108,7 @@ struct reservation_object_list {
  */
 struct reservation_object {
 	struct ww_mutex lock;
+	struct llist_node locked;
 	seqcount_t seq;
 
 	struct dma_fence __rcu *fence_excl;
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ