lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230827175449.1766701-11-dmitry.osipenko@collabora.com>
Date:   Sun, 27 Aug 2023 20:54:36 +0300
From:   Dmitry Osipenko <dmitry.osipenko@...labora.com>
To:     David Airlie <airlied@...il.com>,
        Gerd Hoffmann <kraxel@...hat.com>,
        Gurchetan Singh <gurchetansingh@...omium.org>,
        Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
        Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
        Maxime Ripard <mripard@...nel.org>,
        Thomas Zimmermann <tzimmermann@...e.de>,
        Christian König <christian.koenig@....com>,
        Qiang Yu <yuq825@...il.com>,
        Steven Price <steven.price@....com>,
        Boris Brezillon <boris.brezillon@...labora.com>,
        Emma Anholt <emma@...olt.net>, Melissa Wen <mwen@...lia.com>,
        Will Deacon <will@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Boqun Feng <boqun.feng@...il.com>,
        Mark Rutland <mark.rutland@....com>
Cc:     dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
        kernel@...labora.com, virtualization@...ts.linux-foundation.org,
        intel-gfx@...ts.freedesktop.org
Subject: [PATCH v15 10/23] locking/refcount, kref: Add kref_put_ww_mutex()

Introduce kref_put_ww_mutex() helper that will handle the wait-wound
mutex auto-locking on kref_put(). This helper is wanted by DRM drivers
that extensively use dma-reservation locking which in turns uses ww-mutex.

Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
---
 include/linux/kref.h     | 12 ++++++++++++
 include/linux/refcount.h |  5 +++++
 lib/refcount.c           | 34 ++++++++++++++++++++++++++++++++++
 3 files changed, 51 insertions(+)

diff --git a/include/linux/kref.h b/include/linux/kref.h
index d32e21a2538c..b2d8dc6e9ae0 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -90,6 +90,18 @@ static inline int kref_put_lock(struct kref *kref,
 	return 0;
 }
 
+static inline int kref_put_ww_mutex(struct kref *kref,
+				    void (*release)(struct kref *kref),
+				    struct ww_mutex *lock,
+				    struct ww_acquire_ctx *ctx)
+{
+	if (refcount_dec_and_ww_mutex_lock(&kref->refcount, lock, ctx)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
+
 /**
  * kref_get_unless_zero - Increment refcount for object unless it is zero.
  * @kref: object.
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a62fcca97486..be9ad272bc77 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -99,6 +99,8 @@
 #include <linux/spinlock_types.h>
 
 struct mutex;
+struct ww_mutex;
+struct ww_acquire_ctx;
 
 /**
  * typedef refcount_t - variant of atomic_t specialized for reference counts
@@ -366,4 +368,7 @@ extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
 						       spinlock_t *lock,
 						       unsigned long *flags) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_ww_mutex_lock(refcount_t *r,
+							struct ww_mutex *lock,
+							struct ww_acquire_ctx *ctx) __cond_acquires(&lock->base);
 #endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3c..3f6fd0ceed02 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -6,6 +6,7 @@
 #include <linux/mutex.h>
 #include <linux/refcount.h>
 #include <linux/spinlock.h>
+#include <linux/ww_mutex.h>
 #include <linux/bug.h>
 
 #define REFCOUNT_WARN(str)	WARN_ONCE(1, "refcount_t: " str ".\n")
@@ -184,3 +185,36 @@ bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
 	return true;
 }
 EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
+
+/**
+ * refcount_dec_and_ww_mutex_lock - return holding ww-mutex if able to
+ *                                  decrement refcount to 0
+ * @r: the refcount
+ * @lock: the ww-mutex to be locked
+ * @ctx: wait-wound context
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold ww-mutex lock if able to decrement refcount to 0,
+ *         false otherwise
+ */
+bool refcount_dec_and_ww_mutex_lock(refcount_t *r, struct ww_mutex *lock,
+				    struct ww_acquire_ctx *ctx)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	ww_mutex_lock(lock, ctx);
+	if (!refcount_dec_and_test(r)) {
+		ww_mutex_unlock(lock);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_ww_mutex_lock);
-- 
2.41.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ