[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230828112604.297db23a@collabora.com>
Date: Mon, 28 Aug 2023 11:26:04 +0200
From: Boris Brezillon <boris.brezillon@...labora.com>
To: Dmitry Osipenko <dmitry.osipenko@...labora.com>
Cc: David Airlie <airlied@...il.com>,
Gerd Hoffmann <kraxel@...hat.com>,
Gurchetan Singh <gurchetansingh@...omium.org>,
Chia-I Wu <olvaffe@...il.com>, Daniel Vetter <daniel@...ll.ch>,
Maarten Lankhorst <maarten.lankhorst@...ux.intel.com>,
Maxime Ripard <mripard@...nel.org>,
Thomas Zimmermann <tzimmermann@...e.de>,
Christian König <christian.koenig@....com>,
Qiang Yu <yuq825@...il.com>,
Steven Price <steven.price@....com>,
Emma Anholt <emma@...olt.net>, Melissa Wen <mwen@...lia.com>,
Will Deacon <will@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Boqun Feng <boqun.feng@...il.com>,
Mark Rutland <mark.rutland@....com>,
dri-devel@...ts.freedesktop.org, linux-kernel@...r.kernel.org,
kernel@...labora.com, virtualization@...ts.linux-foundation.org,
intel-gfx@...ts.freedesktop.org
Subject: Re: [PATCH v15 10/23] locking/refcount, kref: Add
kref_put_ww_mutex()
On Sun, 27 Aug 2023 20:54:36 +0300
Dmitry Osipenko <dmitry.osipenko@...labora.com> wrote:
> Introduce kref_put_ww_mutex() helper that will handle the wait-wound
> mutex auto-locking on kref_put(). This helper is wanted by DRM drivers
> that extensively use dma-reservation locking which in turns uses ww-mutex.
>
> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@...labora.com>
> ---
> include/linux/kref.h | 12 ++++++++++++
> include/linux/refcount.h | 5 +++++
> lib/refcount.c | 34 ++++++++++++++++++++++++++++++++++
> 3 files changed, 51 insertions(+)
>
> diff --git a/include/linux/kref.h b/include/linux/kref.h
> index d32e21a2538c..b2d8dc6e9ae0 100644
> --- a/include/linux/kref.h
> +++ b/include/linux/kref.h
> @@ -90,6 +90,18 @@ static inline int kref_put_lock(struct kref *kref,
> return 0;
> }
>
> +static inline int kref_put_ww_mutex(struct kref *kref,
> + void (*release)(struct kref *kref),
> + struct ww_mutex *lock,
> + struct ww_acquire_ctx *ctx)
> +{
> + if (refcount_dec_and_ww_mutex_lock(&kref->refcount, lock, ctx)) {
> + release(kref);
> + return 1;
> + }
> + return 0;
> +}
> +
> /**
> * kref_get_unless_zero - Increment refcount for object unless it is zero.
> * @kref: object.
> diff --git a/include/linux/refcount.h b/include/linux/refcount.h
> index a62fcca97486..be9ad272bc77 100644
> --- a/include/linux/refcount.h
> +++ b/include/linux/refcount.h
> @@ -99,6 +99,8 @@
> #include <linux/spinlock_types.h>
>
> struct mutex;
> +struct ww_mutex;
> +struct ww_acquire_ctx;
>
> /**
> * typedef refcount_t - variant of atomic_t specialized for reference counts
> @@ -366,4 +368,7 @@ extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
> extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
> spinlock_t *lock,
> unsigned long *flags) __cond_acquires(lock);
> +extern __must_check bool refcount_dec_and_ww_mutex_lock(refcount_t *r,
> + struct ww_mutex *lock,
> + struct ww_acquire_ctx *ctx) __cond_acquires(&lock->base);
> #endif /* _LINUX_REFCOUNT_H */
> diff --git a/lib/refcount.c b/lib/refcount.c
> index a207a8f22b3c..3f6fd0ceed02 100644
> --- a/lib/refcount.c
> +++ b/lib/refcount.c
> @@ -6,6 +6,7 @@
> #include <linux/mutex.h>
> #include <linux/refcount.h>
> #include <linux/spinlock.h>
> +#include <linux/ww_mutex.h>
> #include <linux/bug.h>
>
> #define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
> @@ -184,3 +185,36 @@ bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
> return true;
> }
> EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
> +
> +/**
> + * refcount_dec_and_ww_mutex_lock - return holding ww-mutex if able to
> + * decrement refcount to 0
> + * @r: the refcount
> + * @lock: the ww-mutex to be locked
> + * @ctx: wait-wound context
> + *
> + * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
> + * decrement when saturated at REFCOUNT_SATURATED.
> + *
> + * Provides release memory ordering, such that prior loads and stores are done
> + * before, and provides a control dependency such that free() must come after.
> + * See the comment on top.
> + *
> + * Return: true and hold ww-mutex lock if able to decrement refcount to 0,
> + * false otherwise
> + */
> +bool refcount_dec_and_ww_mutex_lock(refcount_t *r, struct ww_mutex *lock,
> + struct ww_acquire_ctx *ctx)
> +{
> + if (refcount_dec_not_one(r))
> + return false;
> +
> + ww_mutex_lock(lock, ctx);
Unless I'm wrong, ww_mutex_lock() can return -EDEADLK when ctx !=
NULL, in which case, the lock is not held when it returns. Question is,
do we really have a use case for ctx != NULL in that kref_put_ww_mutex()
path. If we need to acquire other ww_locks, this lock, and the other
locks should have been acquired beforehand, and we can simply call
kref_put() when we want to release the ref on the resource.
> + if (!refcount_dec_and_test(r)) {
> + ww_mutex_unlock(lock);
> + return false;
> + }
> +
> + return true;
> +}
> +EXPORT_SYMBOL(refcount_dec_and_ww_mutex_lock);
Powered by blists - more mailing lists