[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190801010944.174039805@linutronix.de>
Date: Thu, 01 Aug 2019 03:01:29 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Sebastian Siewior <bigeasy@...utronix.de>,
Anna-Maria Gleixner <anna-maria@...utronix.de>,
Steven Rostedt <rostedt@...dmis.org>,
Julia Cartwright <julia@...com>, Jan Kara <jack@...e.cz>,
"Theodore Tso" <tytso@....edu>,
Alexander Viro <viro@...iv.linux.org.uk>,
Matthew Wilcox <willy@...radead.org>,
linux-fsdevel@...r.kernel.org, linux-ext4@...r.kernel.org,
Jan Kara <jack@...e.com>, Mark Fasheh <mark@...heh.com>,
Joseph Qi <joseph.qi@...ux.alibaba.com>,
Joel Becker <jlbec@...lplan.org>
Subject: [patch V2 3/7] fs/buffer: Substitute BH_Uptodate_Lock for RT and bit
spinlock debugging
Bit spinlocks are problematic if PREEMPT_RT is enabled. They disable
preemption, which is undesired for latency reasons and breaks when regular
spinlocks are taken within the bit_spinlock locked region because regular
spinlocks are converted to 'sleeping spinlocks' on RT.
Substitute the BH_Uptodate_Lock bit spinlock with a regular spinlock for
PREEMPT_RT enabled kernels.
Bit spinlocks are also not covered by lock debugging, e.g. lockdep. With
the spinlock substitution in place, they can be exposed via
CONFIG_DEBUG_BIT_SPINLOCKS.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Jan Kara <jack@...e.cz>
Cc: "Theodore Ts'o" <tytso@....edu>
Cc: Alexander Viro <viro@...iv.linux.org.uk>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: linux-fsdevel@...r.kernel.org
---
V2: Collected Reviewed-by tag
---
fs/buffer.c | 1 +
include/linux/buffer_head.h | 31 +++++++++++++++++++++++++++++++
2 files changed, 32 insertions(+)
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3360,6 +3360,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
+ buffer_head_init_locks(ret);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -76,8 +76,35 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_DEBUG_BIT_SPINLOCKS)
+ spinlock_t b_uptodate_lock;
+#endif
};
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_DEBUG_BIT_SPINLOCKS)
+
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
+ return flags;
+}
+
+static inline void
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
+{
+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
+}
+
+static inline void buffer_head_init_locks(struct buffer_head *bh)
+{
+ spin_lock_init(&bh->b_uptodate_lock);
+}
+
+#else /* PREEMPT_RT || DEBUG_BIT_SPINLOCKS */
+
static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
{
unsigned long flags;
@@ -94,6 +121,10 @@ bh_uptodate_unlock_irqrestore(struct buf
local_irq_restore(flags);
}
+static inline void buffer_head_init_locks(struct buffer_head *bh) { }
+
+#endif /* !PREEMPT_RT && !DEBUG_BIT_SPINLOCKS */
+
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
Powered by blists - more mailing lists