[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250729081256.3433892-2-yuzhuo@google.com>
Date: Tue, 29 Jul 2025 01:12:54 -0700
From: Yuzhuo Jing <yuzhuo@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
Liang Kan <kan.liang@...ux.intel.com>, Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>, Albert Ou <aou@...s.berkeley.edu>,
Alexandre Ghiti <alex@...ti.fr>, Yuzhuo Jing <yzj@...ch.edu>, Yuzhuo Jing <yuzhuo@...gle.com>,
Guo Ren <guoren@...nel.org>, Andrea Parri <parri.andrea@...il.com>,
Leonardo Bras <leobras@...hat.com>, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org, linux-riscv@...ts.infradead.org
Subject: [PATCH v1 1/3] tools: Import atomic_fetch_{and,add,sub}
Import necessary function (atomic_fetch_add) for ticket spinlock
Implementation. In addition, also import those that pair with the
imported ones (atomic_fetch_sub, atomic_fetch_and).
Signed-off-by: Yuzhuo Jing <yuzhuo@...gle.com>
---
tools/arch/x86/include/asm/atomic.h | 17 +++++++++
tools/arch/x86/include/asm/cmpxchg.h | 11 ++++++
tools/include/asm-generic/atomic-gcc.h | 51 ++++++++++++++++++++++++++
3 files changed, 79 insertions(+)
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h
index a55ffd4eb5f1..1fb7711ebbd7 100644
--- a/tools/arch/x86/include/asm/atomic.h
+++ b/tools/arch/x86/include/asm/atomic.h
@@ -66,6 +66,14 @@ static inline int atomic_dec_and_test(atomic_t *v)
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
+static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ return xadd(&v->counter, i);
+}
+#define atomic_fetch_add atomic_fetch_add
+
+#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), v)
+
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
@@ -85,6 +93,15 @@ static __always_inline int atomic_fetch_or(int i, atomic_t *v)
return val;
}
+static __always_inline int atomic_fetch_and(int i, atomic_t *v)
+{
+ int val = atomic_read(v);
+
+ do { } while (!atomic_try_cmpxchg(v, &val, val & i));
+
+ return val;
+}
+
static inline int test_and_set_bit(long nr, unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, "Ir", nr, "%0", "c");
diff --git a/tools/arch/x86/include/asm/cmpxchg.h b/tools/arch/x86/include/asm/cmpxchg.h
index 5372da8b27fc..2d89f150badf 100644
--- a/tools/arch/x86/include/asm/cmpxchg.h
+++ b/tools/arch/x86/include/asm/cmpxchg.h
@@ -12,6 +12,8 @@ extern void __xchg_wrong_size(void)
__compiletime_error("Bad argument size for xchg");
extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
+extern void __xadd_wrong_size(void)
+ __compiletime_error("Bad argument size for xadd");
/*
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
@@ -200,4 +202,13 @@ extern void __cmpxchg_wrong_size(void)
#define try_cmpxchg(ptr, pold, new) \
__try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
+/*
+ * xadd() adds "inc" to "*ptr" and atomically returns the previous
+ * value of "*ptr".
+ *
+ * xadd() is locked when multiple CPUs are online
+ */
+#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
+#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
+
#endif /* TOOLS_ASM_X86_CMPXCHG_H */
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h
index 08b7b3b36873..cc146b82bb34 100644
--- a/tools/include/asm-generic/atomic-gcc.h
+++ b/tools/include/asm-generic/atomic-gcc.h
@@ -100,6 +100,23 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
return likely(r == o);
}
+/**
+ * atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+ return __sync_fetch_and_and(&v->counter, i);
+}
+
/**
* atomic_fetch_or() - atomic bitwise OR with full ordering
* @i: int value
@@ -117,6 +134,40 @@ atomic_fetch_or(int i, atomic_t *v)
return __sync_fetch_and_or(&v->counter, i);
}
+/**
+ * atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+ return __sync_fetch_and_add(&v->counter, i);
+}
+
+/**
+ * atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+ return __sync_fetch_and_sub(&v->counter, i);
+}
+
static inline int test_and_set_bit(long nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
--
2.50.1.487.gc89ff58d15-goog
Powered by blists - more mailing lists