[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241113085703.148839-1-yongli-oc@zhaoxin.com>
Date: Wed, 13 Nov 2024 16:57:03 +0800
From: yongli-oc <yongli-oc@...oxin.com>
To: <akpm@...ux-foundation.org>, <linux-kernel@...r.kernel.org>
CC: <yongli@...oxin.com>, <cobechen@...oxin.com>, <louisqi@...oxin.com>,
<jiangbowang@...oxin.com>
Subject: [PATCH] Support lockref reference count if enable LOCK_STAT
Swap the positions of lock and count to support CMPXCHG_LOCKREF
if SPINLOCK_SIZE > 4 when enable LOCK_STAT. The reference count
can always be used regardless of the spinlock_t size.
Signed-off-by: yongli-oc <yongli-oc@...oxin.com>
---
include/linux/lockref.h | 17 ++++++++++++++++-
lib/lockref.c | 26 ++++++++++++++++++++++++++
2 files changed, 42 insertions(+), 1 deletion(-)
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c3a1f78bc884..44ac754a029b 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -22,15 +22,30 @@
(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
+#define USE_CMPXCHG_LOCKREF_ALTERNATIVE \
+ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
+ IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE > 4)
+
struct lockref {
union {
#if USE_CMPXCHG_LOCKREF
aligned_u64 lock_count;
-#endif
struct {
spinlock_t lock;
int count;
};
+#elif USE_CMPXCHG_LOCKREF_ALTERNATIVE
+ aligned_u64 lock_count;
+ struct {
+ int count;
+ spinlock_t lock;
+ } __packed;
+#else
+ struct {
+ spinlock_t lock;
+ int count;
+ };
+#endif
};
};
diff --git a/lib/lockref.c b/lib/lockref.c
index 2afe4c5d8919..e92606f66e9b 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -26,6 +26,32 @@
} \
} while (0)
+#elif USE_CMPXCHG_LOCKREF_ALTERNATIVE
+
+/*
+ * Note that the "cmpxchg()" reloads the "old" value for the
+ * failure case.
+ */
+#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
+ int retry = 100; \
+ struct lockref old; \
+ BUILD_BUG_ON(offsetof(struct lockref, lock) != 4); \
+ BUILD_BUG_ON(offsetof(spinlock_t, rlock) != 0); \
+ BUILD_BUG_ON(offsetof(raw_spinlock_t, raw_lock) != 0); \
+ old.lock_count = READ_ONCE(lockref->lock_count); \
+ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
+ struct lockref new = old; \
+ CODE \
+ if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \
+ &old.lock_count, \
+ new.lock_count))) { \
+ SUCCESS; \
+ } \
+ if (!--retry) \
+ break; \
+ } \
+} while (0)
+
#else
#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
--
2.34.1
Powered by blists - more mailing lists