[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200519201912.1564477-4-bigeasy@linutronix.de>
Date: Tue, 19 May 2020 22:19:07 +0200
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: linux-kernel@...r.kernel.org
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>,
Will Deacon <will@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
"Paul E . McKenney" <paulmck@...nel.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Lai Jiangshan <jiangshanlai@...il.com>,
Josh Triplett <josh@...htriplett.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
rcu@...r.kernel.org
Subject: [PATCH 3/8] srcu: Use local_lock() for per-CPU struct srcu_data access
SRCU disables interrupts to get a stable per-CPU pointer and then
acquires the spinlock which is in the per-CPU data structure. The
release uses spin_unlock_irqrestore(). While this is correct on a non-RT
kernel, this conflicts with the RT semantics because the spinlock is
converted to a 'sleeping' spinlock. Sleeping locks can obviously not be
acquired with interrupts disabled.
Add a local lock and use the corresponding local lock operations. Split
the restore into unlock and local_lock_irqrestore(). The local lock
operations map to local_irq_disable/enable() on a non-RT kernel. On a RT
kernel the local lock is substituted with a real per CPU lock which
serializes the access and guarantees CPU locality, but keeps the code
section preemptible. No functional change.
Cc: Lai Jiangshan <jiangshanlai@...il.com>
Cc: "Paul E. McKenney" <paulmck@...nel.org>
Cc: Josh Triplett <josh@...htriplett.org>
Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Cc: rcu@...r.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
kernel/rcu/srcutree.c | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 0c71505f0e19c..8d2b5f75145d7 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -25,6 +25,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/srcu.h>
+#include <linux/locallock.h>
#include "rcu.h"
#include "rcu_segcblist.h"
@@ -735,6 +736,7 @@ static void srcu_flip(struct srcu_struct *ssp)
smp_mb(); /* D */ /* Pairs with C. */
}
+static DEFINE_LOCAL_LOCK(sda_lock);
/*
* If SRCU is likely idle, return true, otherwise return false.
*
@@ -765,13 +767,13 @@ static bool srcu_might_be_idle(struct srcu_struct *ssp)
unsigned long tlast;
/* If the local srcu_data structure has callbacks, not idle. */
- local_irq_save(flags);
+ local_lock_irqsave(sda_lock, flags);
sdp = this_cpu_ptr(ssp->sda);
if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
- local_irq_restore(flags);
+ local_unlock_irqrestore(sda_lock, flags);
return false; /* Callbacks already present, so not idle. */
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(sda_lock, flags);
/*
* No local callbacks, so probabalistically probe global state.
@@ -851,7 +853,7 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
}
rhp->func = func;
idx = srcu_read_lock(ssp);
- local_irq_save(flags);
+ local_lock_irqsave(sda_lock, flags);
sdp = this_cpu_ptr(ssp->sda);
spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
@@ -867,7 +869,8 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
sdp->srcu_gp_seq_needed_exp = s;
needexp = true;
}
- spin_unlock_irqrestore_rcu_node(sdp, flags);
+ spin_unlock_rcu_node(sdp);
+ local_unlock_irqrestore(sda_lock, flags);
if (needgp)
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
else if (needexp)
--
2.26.2
Powered by blists - more mailing lists