[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191226152922.2034-1-ebiggers@kernel.org>
Date: Thu, 26 Dec 2019 09:29:22 -0600
From: Eric Biggers <ebiggers@...nel.org>
To: linux-kernel@...r.kernel.org, Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Will Deacon <will@...nel.org>
Cc: Elena Reshetova <elena.reshetova@...el.com>,
Thomas Gleixner <tglx@...utronix.de>,
Kees Cook <keescook@...omium.org>,
Anna-Maria Gleixner <anna-maria@...utronix.de>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: [PATCH] locking/refcount: add sparse annotations to dec-and-lock functions
From: Eric Biggers <ebiggers@...gle.com>
Wrap refcount_dec_and_lock() and refcount_dec_and_lock_irqsave() with
macros using __cond_lock() so that 'sparse' doesn't report warnings
about unbalanced locking when using them.
This is the same thing that's done for their atomic_t equivalents.
Don't annotate refcount_dec_and_mutex_lock(), because mutexes don't
currently have sparse annotations.
Signed-off-by: Eric Biggers <ebiggers@...gle.com>
---
include/linux/refcount.h | 45 ++++++++++++++++++++++++++++++++++++----
lib/refcount.c | 39 +++++-----------------------------
2 files changed, 46 insertions(+), 38 deletions(-)
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 0ac50cf62d06..6bb5ab9e98ed 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -300,8 +300,45 @@ static inline void refcount_dec(refcount_t *r)
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
- spinlock_t *lock,
- unsigned long *flags);
+
+/**
+ * refcount_dec_and_lock - return holding spinlock if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
+extern __must_check bool _refcount_dec_and_lock(refcount_t *r,
+ spinlock_t *lock);
+#define refcount_dec_and_lock(r, lock) \
+ __cond_lock(lock, _refcount_dec_and_lock(r, lock))
+
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ * interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interrupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
+extern __must_check bool _refcount_dec_and_lock_irqsave(refcount_t *r,
+ spinlock_t *lock,
+ unsigned long *flags);
+#define refcount_dec_and_lock_irqsave(r, lock, flags) \
+ __cond_lock(lock, _refcount_dec_and_lock_irqsave(r, lock, flags))
+
#endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index ebac8b7d15a7..f0eb996b28c0 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -125,23 +125,7 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
}
EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
-/**
- * refcount_dec_and_lock - return holding spinlock if able to decrement
- * refcount to 0
- * @r: the refcount
- * @lock: the spinlock to be locked
- *
- * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
- * decrement when saturated at REFCOUNT_SATURATED.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
- *
- * Return: true and hold spinlock if able to decrement refcount to 0, false
- * otherwise
- */
-bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+bool _refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
if (refcount_dec_not_one(r))
return false;
@@ -154,23 +138,10 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
return true;
}
-EXPORT_SYMBOL(refcount_dec_and_lock);
+EXPORT_SYMBOL(_refcount_dec_and_lock);
-/**
- * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
- * interrupts if able to decrement refcount to 0
- * @r: the refcount
- * @lock: the spinlock to be locked
- * @flags: saved IRQ-flags if the is acquired
- *
- * Same as refcount_dec_and_lock() above except that the spinlock is acquired
- * with disabled interupts.
- *
- * Return: true and hold spinlock if able to decrement refcount to 0, false
- * otherwise
- */
-bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
- unsigned long *flags)
+bool _refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+ unsigned long *flags)
{
if (refcount_dec_not_one(r))
return false;
@@ -183,4 +154,4 @@ bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
return true;
}
-EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
+EXPORT_SYMBOL(_refcount_dec_and_lock_irqsave);
--
2.24.1
Powered by blists - more mailing lists