[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190802151501.13069-13-paulmck@linux.ibm.com>
Date: Fri, 2 Aug 2019 08:15:00 -0700
From: "Paul E. McKenney" <paulmck@...ux.ibm.com>
To: rcu@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, mingo@...nel.org,
jiangshanlai@...il.com, dipankar@...ibm.com,
akpm@...ux-foundation.org, mathieu.desnoyers@...icios.com,
josh@...htriplett.org, tglx@...utronix.de, peterz@...radead.org,
rostedt@...dmis.org, dhowells@...hat.com, edumazet@...gle.com,
fweisbec@...il.com, oleg@...hat.com, joel@...lfernandes.org,
"Paul E. McKenney" <paulmck@...ux.ibm.com>
Subject: [PATCH RFC tip/core/rcu 13/14] rcutorture: Force on tick for readers and callback flooders
Readers and callback flooders in the rcutorture stress-test suite run for
extended time periods by design. They do take pains to relinquish the
CPU from time to time, but in some cases this relies on the scheduler
being active, which in turn relies on the scheduler-clock interrupt
firing from time to time.
This commit therefore forces scheduling-clock interrupts within
these loops. While in the area, this commit also prevents
rcu_torture_reader()'s occasional timed sleeps from delaying shutdown.
Signed-off-by: Paul E. McKenney <paulmck@...ux.ibm.com>
---
kernel/rcu/rcutorture.c | 24 ++++++++++++++++++------
1 file changed, 18 insertions(+), 6 deletions(-)
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 3c9feca1eab1..bf08aa783ecc 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -44,6 +44,7 @@
#include <linux/sched/debug.h>
#include <linux/sched/sysctl.h>
#include <linux/oom.h>
+#include <linux/tick.h>
#include "rcu.h"
@@ -1363,15 +1364,16 @@ rcu_torture_reader(void *arg)
set_user_nice(current, MAX_NICE);
if (irqreader && cur_ops->irq_capable)
timer_setup_on_stack(&t, rcu_torture_timer, 0);
-
+ if (IS_ENABLED(CONFIG_NO_HZ_FULL))
+ tick_dep_set_task(current, TICK_DEP_MASK_RCU);
do {
if (irqreader && cur_ops->irq_capable) {
if (!timer_pending(&t))
mod_timer(&t, jiffies + 1);
}
- if (!rcu_torture_one_read(&rand))
+ if (!rcu_torture_one_read(&rand) && !torture_must_stop())
schedule_timeout_interruptible(HZ);
- if (time_after(jiffies, lastsleep)) {
+ if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
schedule_timeout_interruptible(1);
lastsleep = jiffies + 10;
}
@@ -1383,6 +1385,8 @@ rcu_torture_reader(void *arg)
del_timer_sync(&t);
destroy_timer_on_stack(&t);
}
+ if (IS_ENABLED(CONFIG_NO_HZ_FULL))
+ tick_dep_clear_task(current, TICK_DEP_MASK_RCU);
torture_kthread_stopping("rcu_torture_reader");
return 0;
}
@@ -1729,10 +1733,10 @@ static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
// Real call_rcu() floods hit userspace, so emulate that.
if (need_resched() || (iter & 0xfff))
schedule();
- } else {
- // No userspace emulation: CB invocation throttles call_rcu()
- cond_resched();
+ return;
}
+ // No userspace emulation: CB invocation throttles call_rcu()
+ cond_resched();
}
/*
@@ -1781,6 +1785,8 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
init_rcu_head_on_stack(&fcs.rh);
selfpropcb = true;
}
+ if (IS_ENABLED(CONFIG_NO_HZ_FULL))
+ tick_dep_set_task(current, TICK_DEP_MASK_RCU);
/* Tight loop containing cond_resched(). */
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
@@ -1826,6 +1832,8 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
destroy_rcu_head_on_stack(&fcs.rh);
}
schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
+ if (IS_ENABLED(CONFIG_NO_HZ_FULL))
+ tick_dep_clear_task(current, TICK_DEP_MASK_RCU);
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
}
@@ -1865,6 +1873,8 @@ static void rcu_torture_fwd_prog_cr(void)
cver = READ_ONCE(rcu_torture_current_version);
gps = cur_ops->get_gp_seq();
rcu_launder_gp_seq_start = gps;
+ if (IS_ENABLED(CONFIG_NO_HZ_FULL))
+ tick_dep_set_task(current, TICK_DEP_MASK_RCU);
while (time_before(jiffies, stopat) &&
!shutdown_time_arrived() &&
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
@@ -1911,6 +1921,8 @@ static void rcu_torture_fwd_prog_cr(void)
rcu_torture_fwd_cb_hist();
}
schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
+ if (IS_ENABLED(CONFIG_NO_HZ_FULL))
+ tick_dep_clear_task(current, TICK_DEP_MASK_RCU);
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
}
--
2.17.1
Powered by blists - more mailing lists