[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231106191138.3179599-1-aahringo@redhat.com>
Date: Mon, 6 Nov 2023 14:11:36 -0500
From: Alexander Aring <aahringo@...hat.com>
To: peterz@...radead.org
Cc: will@...nel.org, gfs2@...ts.linux.dev, aahringo@...hat.com,
boqun.feng@...il.com, mark.rutland@....com,
linux-kernel@...r.kernel.org
Subject: [PATCH 1/3] refcount: move kdoc to header definition
This patch moves the kdoc for refcount_dec_and_lock functionality to
it's header prototype declarations.
Signed-off-by: Alexander Aring <aahringo@...hat.com>
---
include/linux/refcount.h | 48 ++++++++++++++++++++++++++++++++++++++++
lib/refcount.c | 45 -------------------------------------
2 files changed, 48 insertions(+), 45 deletions(-)
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a62fcca97486..741cc6295f54 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -361,8 +361,56 @@ static inline void refcount_dec(refcount_t *r)
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
+
+/**
+ * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the mutex to be locked
+ *
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold mutex if able to decrement refcount to 0, false
+ * otherwise
+ */
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
+
+/**
+ * refcount_dec_and_lock - return holding spinlock if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
+
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ * interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interrupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags) __cond_acquires(lock);
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3c..c37edf66994f 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -94,22 +94,6 @@ bool refcount_dec_not_one(refcount_t *r)
}
EXPORT_SYMBOL(refcount_dec_not_one);
-/**
- * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
- * refcount to 0
- * @r: the refcount
- * @lock: the mutex to be locked
- *
- * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
- * to decrement when saturated at REFCOUNT_SATURATED.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
- *
- * Return: true and hold mutex if able to decrement refcount to 0, false
- * otherwise
- */
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
if (refcount_dec_not_one(r))
@@ -125,22 +109,6 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
}
EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
-/**
- * refcount_dec_and_lock - return holding spinlock if able to decrement
- * refcount to 0
- * @r: the refcount
- * @lock: the spinlock to be locked
- *
- * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
- * decrement when saturated at REFCOUNT_SATURATED.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
- *
- * Return: true and hold spinlock if able to decrement refcount to 0, false
- * otherwise
- */
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
if (refcount_dec_not_one(r))
@@ -156,19 +124,6 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
}
EXPORT_SYMBOL(refcount_dec_and_lock);
-/**
- * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
- * interrupts if able to decrement refcount to 0
- * @r: the refcount
- * @lock: the spinlock to be locked
- * @flags: saved IRQ-flags if the is acquired
- *
- * Same as refcount_dec_and_lock() above except that the spinlock is acquired
- * with disabled interrupts.
- *
- * Return: true and hold spinlock if able to decrement refcount to 0, false
- * otherwise
- */
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
--
2.39.3
Powered by blists - more mailing lists