[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190821231906.4224-2-swood@redhat.com>
Date: Wed, 21 Aug 2019 18:19:04 -0500
From: Scott Wood <swood@...hat.com>
To: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: linux-rt-users@...r.kernel.org, linux-kernel@...r.kernel.org,
"Paul E . McKenney" <paulmck@...ux.ibm.com>,
Joel Fernandes <joel@...lfernandes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Clark Williams <williams@...hat.com>,
Scott Wood <swood@...hat.com>
Subject: [PATCH RT v2 1/3] rcu: Acquire RCU lock when disabling BHs
A plain local_bh_disable() is documented as creating an RCU critical
section, and (at least) rcutorture expects this to be the case. However,
in_softirq() doesn't block a grace period on PREEMPT_RT, since RCU checks
preempt_count() directly. Even if RCU were changed to check
in_softirq(), that wouldn't allow blocked BH disablers to be boosted.
Fix this by calling rcu_read_lock() from local_bh_disable(), and update
rcu_read_lock_bh_held() accordingly.
Signed-off-by: Scott Wood <swood@...hat.com>
---
Another question is whether non-raw spinlocks are intended to create an
RCU read-side critical section due to implicit preempt disable. If they
are, then we'd need to add rcu_read_lock() there as well since RT doesn't
disable preemption (and rcutorture should explicitly test with a
spinlock). If not, the documentation should make that clear.
include/linux/rcupdate.h | 4 ++++
kernel/rcu/update.c | 4 ++++
kernel/softirq.c | 12 +++++++++---
3 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 388ace315f32..d6e357378732 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -615,10 +615,12 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
+#ifndef CONFIG_PREEMPT_RT_FULL
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
+#endif
}
/*
@@ -628,10 +630,12 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
+#ifndef CONFIG_PREEMPT_RT_FULL
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
+#endif
local_bh_enable();
}
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 016c66a98292..a9cdf3d562bc 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -296,7 +296,11 @@ int rcu_read_lock_bh_held(void)
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ return lock_is_held(&rcu_lock_map) || irqs_disabled();
+#else
return in_softirq() || irqs_disabled();
+#endif
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d16d080a74f7..6080c9328df1 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -115,8 +115,10 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
long soft_cnt;
WARN_ON_ONCE(in_irq());
- if (!in_atomic())
+ if (!in_atomic()) {
local_lock(bh_lock);
+ rcu_read_lock();
+ }
soft_cnt = this_cpu_inc_return(softirq_counter);
WARN_ON_ONCE(soft_cnt == 0);
current->softirq_count += SOFTIRQ_DISABLE_OFFSET;
@@ -151,8 +153,10 @@ void _local_bh_enable(void)
#endif
current->softirq_count -= SOFTIRQ_DISABLE_OFFSET;
- if (!in_atomic())
+ if (!in_atomic()) {
+ rcu_read_unlock();
local_unlock(bh_lock);
+ }
}
void _local_bh_enable_rt(void)
@@ -185,8 +189,10 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
WARN_ON_ONCE(count < 0);
local_irq_enable();
- if (!in_atomic())
+ if (!in_atomic()) {
+ rcu_read_unlock();
local_unlock(bh_lock);
+ }
current->softirq_count -= SOFTIRQ_DISABLE_OFFSET;
preempt_check_resched();
--
1.8.3.1
Powered by blists - more mailing lists