[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <176173057318.2601451.3711090924055288815.tip-bot2@tip-bot2>
Date: Wed, 29 Oct 2025 09:36:13 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject:
[tip: perf/core] unwind: Make unwind_task_info::unwind_mask consistent
The following commit has been merged into the perf/core branch of tip:
Commit-ID: 639214f65b1db87c6992eadf93079ff0d8768c2d
Gitweb: https://git.kernel.org/tip/639214f65b1db87c6992eadf93079ff0d8768c2d
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Mon, 22 Sep 2025 16:09:17 +02:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Wed, 29 Oct 2025 10:29:57 +01:00
unwind: Make unwind_task_info::unwind_mask consistent
The unwind_task_info::unwind_mask was manipulated using a mixture of:
regular store
WRITE_ONCE()
try_cmpxchg()
set_bit()
atomic_long_*()
Clean up and make it consistently atomic_long_t.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://patch.msgid.link/20250924080119.384384486@infradead.org
---
include/linux/unwind_deferred.h | 4 ++--
include/linux/unwind_deferred_types.h | 3 ++-
kernel/unwind/deferred.c | 17 +++++++++--------
3 files changed, 13 insertions(+), 11 deletions(-)
diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h
index 196e12c..f4743c8 100644
--- a/include/linux/unwind_deferred.h
+++ b/include/linux/unwind_deferred.h
@@ -46,7 +46,7 @@ void unwind_deferred_task_exit(struct task_struct *task);
static __always_inline void unwind_reset_info(void)
{
struct unwind_task_info *info = ¤t->unwind_info;
- unsigned long bits = info->unwind_mask;
+ unsigned long bits = atomic_long_read(&info->unwind_mask);
/* Was there any unwinding? */
if (likely(!bits))
@@ -56,7 +56,7 @@ static __always_inline void unwind_reset_info(void)
/* Is a task_work going to run again before going back */
if (bits & UNWIND_PENDING)
return;
- } while (!try_cmpxchg(&info->unwind_mask, &bits, 0UL));
+ } while (!atomic_long_try_cmpxchg(&info->unwind_mask, &bits, 0UL));
current->unwind_info.id.id = 0;
if (unlikely(info->cache)) {
diff --git a/include/linux/unwind_deferred_types.h b/include/linux/unwind_deferred_types.h
index 29452ff..0a4c8dd 100644
--- a/include/linux/unwind_deferred_types.h
+++ b/include/linux/unwind_deferred_types.h
@@ -3,6 +3,7 @@
#define _LINUX_UNWIND_USER_DEFERRED_TYPES_H
#include <linux/types.h>
+#include <linux/atomic.h>
struct unwind_cache {
unsigned long unwind_completed;
@@ -32,7 +33,7 @@ union unwind_task_id {
};
struct unwind_task_info {
- unsigned long unwind_mask;
+ atomic_long_t unwind_mask;
struct unwind_cache *cache;
struct callback_head work;
union unwind_task_id id;
diff --git a/kernel/unwind/deferred.c b/kernel/unwind/deferred.c
index 09617d8..a88fb48 100644
--- a/kernel/unwind/deferred.c
+++ b/kernel/unwind/deferred.c
@@ -53,7 +53,7 @@ DEFINE_STATIC_SRCU(unwind_srcu);
static inline bool unwind_pending(struct unwind_task_info *info)
{
- return test_bit(UNWIND_PENDING_BIT, &info->unwind_mask);
+ return atomic_long_read(&info->unwind_mask) & UNWIND_PENDING;
}
/*
@@ -141,7 +141,7 @@ int unwind_user_faultable(struct unwind_stacktrace *trace)
cache->nr_entries = trace->nr;
/* Clear nr_entries on way back to user space */
- set_bit(UNWIND_USED_BIT, &info->unwind_mask);
+ atomic_long_or(UNWIND_USED, &info->unwind_mask);
return 0;
}
@@ -159,7 +159,7 @@ static void process_unwind_deferred(struct task_struct *task)
/* Clear pending bit but make sure to have the current bits */
bits = atomic_long_fetch_andnot(UNWIND_PENDING,
- (atomic_long_t *)&info->unwind_mask);
+ &info->unwind_mask);
/*
* From here on out, the callback must always be called, even if it's
* just an empty trace.
@@ -264,7 +264,7 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
*cookie = get_cookie(info);
- old = READ_ONCE(info->unwind_mask);
+ old = atomic_long_read(&info->unwind_mask);
/* Is this already queued or executed */
if (old & bit)
@@ -277,7 +277,7 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
* to have a callback.
*/
bits = UNWIND_PENDING | bit;
- old = atomic_long_fetch_or(bits, (atomic_long_t *)&info->unwind_mask);
+ old = atomic_long_fetch_or(bits, &info->unwind_mask);
if (old & bits) {
/*
* If the work's bit was set, whatever set it had better
@@ -291,7 +291,7 @@ int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
ret = task_work_add(current, &info->work, twa_mode);
if (WARN_ON_ONCE(ret))
- WRITE_ONCE(info->unwind_mask, 0);
+ atomic_long_set(&info->unwind_mask, 0);
return ret;
}
@@ -323,7 +323,8 @@ void unwind_deferred_cancel(struct unwind_work *work)
guard(rcu)();
/* Clear this bit from all threads */
for_each_process_thread(g, t) {
- clear_bit(bit, &t->unwind_info.unwind_mask);
+ atomic_long_andnot(BIT(bit),
+ &t->unwind_info.unwind_mask);
if (t->unwind_info.cache)
clear_bit(bit, &t->unwind_info.cache->unwind_completed);
}
@@ -353,7 +354,7 @@ void unwind_task_init(struct task_struct *task)
memset(info, 0, sizeof(*info));
init_task_work(&info->work, unwind_deferred_task_work);
- info->unwind_mask = 0;
+ atomic_long_set(&info->unwind_mask, 0);
}
void unwind_task_free(struct task_struct *task)
Powered by blists - more mailing lists