[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231103161635.1902667-1-aahringo@redhat.com>
Date: Fri, 3 Nov 2023 12:16:34 -0400
From: Alexander Aring <aahringo@...hat.com>
To: will@...nel.org
Cc: gfs2@...ts.linux.dev, aahringo@...hat.com, peterz@...radead.org,
boqun.feng@...il.com, mark.rutland@....com,
linux-kernel@...r.kernel.org
Subject: [RFC 1/2] refcount: introduce generic lockptr funcs
This patch introduce lockptr refcount operations. Currently refcount has
a lot of refcount_dec_and_lock() functionality for most common used
locktype. Those functions look mostly all the same and is duplicated
inside the refcount implementation. Instead of introducing a new whole
refcount_dec_and_lock() functionality e.g. for rwlock_t and their _bh
variants this patch will introduce lockptr. A lockptr is just a void *
and refers to the actual locking instance that can even be an own
locking type. Over the passed callbacks for lock and unlock operations
the void *lockptr becomes to the real thing by casting it and do the
locktype specific lock operation.
Signed-off-by: Alexander Aring <aahringo@...hat.com>
---
include/linux/refcount.h | 15 +++++++
lib/refcount.c | 92 ++++++++++++++++++++++++++++------------
2 files changed, 80 insertions(+), 27 deletions(-)
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a62fcca97486..7b1fb85212cc 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -366,4 +366,19 @@ extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags) __cond_acquires(lock);
+extern bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
+ void (*unlock)(void *lockptr), void *lockptr) __cond_acquires(lockptr);
+
+extern void lockptr_mutex_lock(void *lockptr) __acquires(lockptr);
+extern void lockptr_mutex_unlock(void *lockptr) __releases(lockptr);
+extern void lockptr_spin_lock(void *lockptr) __acquires(lockptr);
+extern void lockptr_spin_unlock(void *lockptr) __releases(lockptr);
+
+struct lockptr_irqsave_data {
+ void *lockptr;
+ unsigned long *flags;
+};
+extern void lockptr_irqsave(void *lockptr) __acquires(lockptr);
+extern void lockptr_irqsave(void *lockptr) __releases(lockptr);
+
#endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3c..e28678f0f473 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -94,6 +94,34 @@ bool refcount_dec_not_one(refcount_t *r)
}
EXPORT_SYMBOL(refcount_dec_not_one);
+bool refcount_dec_and_lockptr(refcount_t *r, void (*lock)(void *lockptr),
+ void (*unlock)(void *lockptr), void *lockptr)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ lock(lockptr);
+ if (!refcount_dec_and_test(r)) {
+ unlock(lockptr);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lockptr);
+
+void lockptr_mutex_lock(void *lockptr)
+{
+ mutex_lock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_mutex_lock);
+
+void lockptr_mutex_unlock(void *lockptr)
+{
+ mutex_unlock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_mutex_unlock);
+
/**
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
* refcount to 0
@@ -112,18 +140,22 @@ EXPORT_SYMBOL(refcount_dec_not_one);
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
- if (refcount_dec_not_one(r))
- return false;
+ return refcount_dec_and_lockptr(r, lockptr_mutex_lock,
+ lockptr_mutex_unlock, lock);
+}
+EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
- mutex_lock(lock);
- if (!refcount_dec_and_test(r)) {
- mutex_unlock(lock);
- return false;
- }
+void lockptr_spin_lock(void *lockptr)
+{
+ spin_lock(lockptr);
+}
+EXPORT_SYMBOL(lockptr_spin_lock);
- return true;
+void lockptr_spin_unlock(void *lockptr)
+{
+ spin_unlock(lockptr);
}
-EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
+EXPORT_SYMBOL(lockptr_spin_unlock);
/**
* refcount_dec_and_lock - return holding spinlock if able to decrement
@@ -143,18 +175,26 @@ EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
- if (refcount_dec_not_one(r))
- return false;
+ return refcount_dec_and_lockptr(r, lockptr_spin_lock,
+ lockptr_spin_unlock, lock);
+}
+EXPORT_SYMBOL(refcount_dec_and_lock);
- spin_lock(lock);
- if (!refcount_dec_and_test(r)) {
- spin_unlock(lock);
- return false;
- }
+void lockptr_lock_irqsave(void *lockptr)
+{
+ struct lockptr_irqsave_data *d = lockptr;
- return true;
+ spin_lock_irqsave(d->lockptr, *d->flags);
}
-EXPORT_SYMBOL(refcount_dec_and_lock);
+EXPORT_SYMBOL(lockptr_lock_irqsave);
+
+void lockptr_unlock_irqsave(void *lockptr)
+{
+ struct lockptr_irqsave_data *d = lockptr;
+
+ spin_unlock_irqrestore(d->lockptr, *d->flags);
+}
+EXPORT_SYMBOL(lockptr_unlock_irqsave);
/**
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
@@ -172,15 +212,13 @@ EXPORT_SYMBOL(refcount_dec_and_lock);
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
- if (refcount_dec_not_one(r))
- return false;
+ struct lockptr_irqsave_data d = {
+ .lockptr = lock,
+ .flags = flags,
+ };
- spin_lock_irqsave(lock, *flags);
- if (!refcount_dec_and_test(r)) {
- spin_unlock_irqrestore(lock, *flags);
- return false;
- }
-
- return true;
+ return refcount_dec_and_lockptr(r, lockptr_lock_irqsave,
+ lockptr_unlock_irqsave, &d);
}
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
+
--
2.39.3
Powered by blists - more mailing lists