[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1362717437-1729-5-git-send-email-walken@google.com>
Date: Thu, 7 Mar 2013 20:37:16 -0800
From: Michel Lespinasse <walken@...gle.com>
To: Oleg Nesterov <oleg@...hat.com>,
David Howells <dhowells@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>
Cc: torvalds@...ux-foundation.org, akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 4/5] kernel: add ticket based fair rwlock
Simple implementation of a fair reader writer spinlock using tickets
(generic implementation, x86 specialized implementation could be made faster)
Signed-off-by: Michel Lespinasse <walken@...gle.com>
---
include/linux/fair_rwlock.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
include/linux/lockdep.h | 15 ++++++++++
2 files changed, 87 insertions(+)
create mode 100644 include/linux/fair_rwlock.h
diff --git a/include/linux/fair_rwlock.h b/include/linux/fair_rwlock.h
new file mode 100644
index 000000000000..28c49262c4af
--- /dev/null
+++ b/include/linux/fair_rwlock.h
@@ -0,0 +1,72 @@
+#ifndef __LINUX_FAIR_RWLOCK_H
+#define __LINUX_FAIR_RWLOCK_H
+
+struct fair_rwlock {
+ atomic_t ticket;
+ atomic_t curr_r;
+ atomic_t curr_w;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define __FAIR_RWLOCK_DEP_MAP_INIT(lockname) , { .name = #lockname }
+#else
+#define __FAIR_RWLOCK_DEP_MAP_INIT(lockname)
+#endif
+
+#define __FAIR_RW_LOCK_UNLOCKED(name) \
+ { ATOMIC_INIT(0), ATOMIC_INIT(0), ATOMIC_INIT(0) \
+ __FAIR_RWLOCK_DEP_MAP_INIT(name) }
+
+static inline int get_ticket(struct fair_rwlock *lock)
+{
+ return atomic_inc_return(&lock->ticket) - 1;
+}
+
+static inline void fair_write_lock(struct fair_rwlock *lock)
+{
+ int ticket;
+
+ fair_rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+
+ preempt_disable();
+ ticket = get_ticket(lock);
+ while (atomic_read(&lock->curr_w) != ticket); /* spin */
+ smp_mb();
+}
+
+static inline void fair_write_unlock(struct fair_rwlock *lock)
+{
+ fair_rwlock_release(&lock->dep_map, 1, _RET_IP_);
+
+ smp_mb();
+ atomic_inc(&lock->curr_r);
+ atomic_inc(&lock->curr_w);
+ preempt_enable();
+}
+
+static inline void fair_read_lock(struct fair_rwlock *lock)
+{
+ int ticket;
+
+ fair_rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+
+ preempt_disable();
+ ticket = get_ticket(lock);
+ while (atomic_read(&lock->curr_r) != ticket); /* spin */
+ smp_mb();
+ atomic_set(&lock->curr_r, ticket + 1);
+}
+
+static inline void fair_read_unlock(struct fair_rwlock *lock)
+{
+ fair_rwlock_release(&lock->dep_map, 1, _RET_IP_);
+
+ smp_mb();
+ atomic_inc(&lock->curr_w);
+ preempt_enable();
+}
+
+#endif /* __LINUX_FAIR_RWLOCK_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f1e877b79ed8..e2f59eadf485 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -510,6 +510,21 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
+# define fair_rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
+# define fair_rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
+# else
+# define fair_rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
+# define fair_rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
+# endif
+# define fair_rwlock_release(l, n, i) lock_release(l, n, i)
+#else
+# define fair_rwlock_acquire(l, s, t, i) do { } while (0)
+# define fair_rwlock_acquire_read(l, s, t, i) do { } while (0)
+# define fair_rwlock_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# else
--
1.8.1.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists