[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251004020049.918665-3-kent.overstreet@linux.dev>
Date: Fri, 3 Oct 2025 22:00:44 -0400
From: Kent Overstreet <kent.overstreet@...ux.dev>
To: linux-kernel@...r.kernel.org
Cc: akpm@...ux-foundation.org,
Kent Overstreet <kent.overstreet@...ux.dev>
Subject: [PATCH 2/7] closures: closure_sub() uses cmpxchg
Part one of fixing a race in __closure_sync_timeout().
lib/closure.c was initially designed with the assumption that refs only
hit 0 once, and when they hit 0 the thread that caused it to hit 0 now
owns it - which avoids many races without the need for cmpxchg.
But __closure_sync_timeout() broke this, because the timeout may elapse
and the thread doing the sync may race with the final put.
Switch to cmpxchg for manipulating cl->remaining; the next patch will
fix some issues with the other thread's closure_put() accessing
variables in struct closure after that final put.
Signed-off-by: Kent Overstreet <kent.overstreet@...ux.dev>
---
include/linux/closure.h | 9 ++++-
lib/closure.c | 79 ++++++++++++++++++++++-------------------
2 files changed, 50 insertions(+), 38 deletions(-)
diff --git a/include/linux/closure.h b/include/linux/closure.h
index a6fcc33fafce..f626044d6ca2 100644
--- a/include/linux/closure.h
+++ b/include/linux/closure.h
@@ -169,11 +169,18 @@ struct closure {
};
void closure_sub(struct closure *cl, int v);
-void closure_put(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void __closure_sync(struct closure *cl);
+/*
+ * closure_put - decrement a closure's refcount
+ */
+static inline void closure_put(struct closure *cl)
+{
+ closure_sub(cl, 1);
+}
+
static inline unsigned closure_nr_remaining(struct closure *cl)
{
return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
diff --git a/lib/closure.c b/lib/closure.c
index 5fafc8b0e99d..21fadd12093c 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -13,14 +13,14 @@
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-static void closure_val_checks(struct closure *cl, unsigned new)
+static void closure_val_checks(struct closure *cl, unsigned new, int d)
{
unsigned count = new & CLOSURE_REMAINING_MASK;
if (WARN(new & CLOSURE_GUARD_MASK,
- "closure %ps has guard bits set: %x (%u)",
+ "closure %ps has guard bits set: %x (%u), delta %i",
cl->fn,
- new, (unsigned) __fls(new & CLOSURE_GUARD_MASK)))
+ new, (unsigned) __fls(new & CLOSURE_GUARD_MASK), d))
new &= ~CLOSURE_GUARD_MASK;
WARN(!count && (new & ~CLOSURE_DESTRUCTOR),
@@ -29,49 +29,54 @@ static void closure_val_checks(struct closure *cl, unsigned new)
new, (unsigned) __fls(new));
}
-static inline void closure_put_after_sub(struct closure *cl, int flags)
-{
- closure_val_checks(cl, flags);
+enum new_closure_state {
+ CLOSURE_normal_put,
+ CLOSURE_requeue,
+ CLOSURE_done,
+};
- if (!(flags & CLOSURE_REMAINING_MASK)) {
- smp_acquire__after_ctrl_dep();
+/* For clearing flags with the same atomic op as a put */
+void closure_sub(struct closure *cl, int v)
+{
+ enum new_closure_state s;
- cl->closure_get_happened = false;
+ int old = atomic_read(&cl->remaining), new;
+ do {
+ new = old - v;
- if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
- atomic_set(&cl->remaining,
- CLOSURE_REMAINING_INITIALIZER);
- closure_queue(cl);
+ if (new & CLOSURE_REMAINING_MASK) {
+ s = CLOSURE_normal_put;
} else {
- struct closure *parent = cl->parent;
- closure_fn *destructor = cl->fn;
+ if (cl->fn && !(new & CLOSURE_DESTRUCTOR)) {
+ s = CLOSURE_requeue;
+ new += CLOSURE_REMAINING_INITIALIZER;
+ } else
+ s = CLOSURE_done;
+ }
- closure_debug_destroy(cl);
+ closure_val_checks(cl, new, -v);
+ } while (!atomic_try_cmpxchg_release(&cl->remaining, &old, new));
- if (destructor)
- destructor(&cl->work);
+ if (s == CLOSURE_normal_put)
+ return;
- if (parent)
- closure_put(parent);
- }
- }
-}
+ if (s == CLOSURE_requeue) {
+ cl->closure_get_happened = false;
+ closure_queue(cl);
+ } else {
+ struct closure *parent = cl->parent;
+ closure_fn *destructor = cl->fn;
-/* For clearing flags with the same atomic op as a put */
-void closure_sub(struct closure *cl, int v)
-{
- closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining));
-}
-EXPORT_SYMBOL(closure_sub);
+ closure_debug_destroy(cl);
-/*
- * closure_put - decrement a closure's refcount
- */
-void closure_put(struct closure *cl)
-{
- closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining));
+ if (destructor)
+ destructor(&cl->work);
+
+ if (parent)
+ closure_put(parent);
+ }
}
-EXPORT_SYMBOL(closure_put);
+EXPORT_SYMBOL(closure_sub);
/*
* closure_wake_up - wake up all closures on a wait list, without memory barrier
@@ -169,7 +174,7 @@ void __sched closure_return_sync(struct closure *cl)
unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
&cl->remaining);
- closure_val_checks(cl, flags);
+ closure_val_checks(cl, flags, 1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR);
if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
while (1) {
--
2.51.0
Powered by blists - more mailing lists