[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240516190437.3545310-3-costa.shul@redhat.com>
Date: Thu, 16 May 2024 22:04:32 +0300
From: Costa Shulyupin <costa.shul@...hat.com>
To: longman@...hat.com,
pauld@...hat.com,
juri.lelli@...hat.com,
prarit@...hat.com,
vschneid@...hat.com,
Anna-Maria Behnsen <anna-maria@...utronix.de>,
Frederic Weisbecker <frederic@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Zefan Li <lizefan.x@...edance.com>,
Tejun Heo <tj@...nel.org>,
Johannes Weiner <hannes@...xchg.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Petr Mladek <pmladek@...e.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Masahiro Yamada <masahiroy@...nel.org>,
Randy Dunlap <rdunlap@...radead.org>,
Yoann Congal <yoann.congal@...le.fr>,
"Gustavo A. R. Silva" <gustavoars@...nel.org>,
Nhat Pham <nphamcs@...il.com>,
Costa Shulyupin <costa.shul@...hat.com>,
linux-kernel@...r.kernel.org,
cgroups@...r.kernel.org
Subject: [PATCH v1 2/7] sched/isolation: Adjust affinity of timers according to change of housekeeping cpumask
Adjust affinity timers and watchdog_cpumask according to
change of housekeeping.cpumasks[HK_TYPE_TIMER] during runtime.
watchdog_cpumask is initialized during boot in lockup_detector_init()
from housekeeping_cpumask(HK_TYPE_TIMER).
lockup_detector_reconfigure() utilizes updated watchdog_cpumask
via __lockup_detector_reconfigure().
timers_resettle_from_cpu() is blindly prototyped from timers_dead_cpu().
local_irq_disable is used because cpuhp_thread_fun uses it before
cpuhp_invoke_callback() call.
Core test snippets without infrastructure:
1. Create timer on specific cpu with:
timer_setup(&test_timer, test_timer_cb, TIMER_PINNED);
test_timer.expires = KTIME_MAX;
add_timer_on(&test_timer, test_cpu);
2. Call housekeeping_update()
3. Assure that there is no timers on specified cpu at the end
of timers_resettle_from_cpu() with:
static int count_timers(int cpu)
{
struct timer_base *base;
int b, v, count = 0;
for (b = 0; b < NR_BASES; b++) {
base = per_cpu_ptr(&timer_bases[b], cpu);
raw_spin_lock_irq(&base->lock);
for (v = 0; v < WHEEL_SIZE; v++) {
struct hlist_node *c;
hlist_for_each(c, base->vectors + v)
count++;
}
raw_spin_unlock_irq(&base->lock);
}
return count;
}
Signed-off-by: Costa Shulyupin <costa.shul@...hat.com>
---
include/linux/timer.h | 2 ++
init/Kconfig | 1 +
kernel/sched/isolation.c | 27 ++++++++++++++++++++++++++
kernel/time/timer.c | 42 ++++++++++++++++++++++++++++++++++++++++
4 files changed, 72 insertions(+)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index e67ecd1cbc97d..a09266abdb18a 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -219,9 +219,11 @@ unsigned long round_jiffies_up_relative(unsigned long j);
#ifdef CONFIG_HOTPLUG_CPU
int timers_prepare_cpu(unsigned int cpu);
int timers_dead_cpu(unsigned int cpu);
+void timers_resettle_from_cpu(unsigned int cpu);
#else
#define timers_prepare_cpu NULL
#define timers_dead_cpu NULL
+static inline void timers_resettle_from_cpu(unsigned int cpu) { }
#endif
#endif
diff --git a/init/Kconfig b/init/Kconfig
index 72404c1f21577..fac49c6bb965a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -682,6 +682,7 @@ config CPU_ISOLATION
bool "CPU isolation"
depends on SMP || COMPILE_TEST
default y
+ select HOTPLUG_CPU
help
Make sure that CPUs running critical tasks are not disturbed by
any source of "noise" such as unbound workqueues, timers, kthreads...
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index 036e48f0e7d1b..3b63f0212887e 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -8,6 +8,10 @@
*
*/
+#ifdef CONFIG_LOCKUP_DETECTOR
+#include <linux/nmi.h>
+#endif
+
enum hk_flags {
HK_FLAG_TIMER = BIT(HK_TYPE_TIMER),
HK_FLAG_RCU = BIT(HK_TYPE_RCU),
@@ -116,6 +120,19 @@ static void __init housekeeping_setup_type(enum hk_type type,
housekeeping_staging);
}
+static void resettle_all_timers(cpumask_var_t enable_mask, cpumask_var_t disable_mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, enable_mask) {
+ timers_prepare_cpu(cpu);
+ }
+
+ for_each_cpu(cpu, disable_mask) {
+ timers_resettle_from_cpu(cpu);
+ }
+}
+
/*
* housekeeping_update - change housekeeping.cpumasks[type] and propagate the
* change.
@@ -144,6 +161,16 @@ static int housekeeping_update(enum hk_type type, cpumask_var_t update)
if (!static_branch_unlikely(&housekeeping_overridden))
static_key_enable_cpuslocked(&housekeeping_overridden.key);
+ switch (type) {
+ case HK_TYPE_TIMER:
+ resettle_all_timers(&masks->enable, &masks->disable);
+#ifdef CONFIG_LOCKUP_DETECTOR
+ cpumask_copy(&watchdog_cpumask, housekeeping_cpumask(HK_TYPE_TIMER));
+ lockup_detector_reconfigure();
+#endif
+ break;
+ default:
+ }
kfree(masks);
return 0;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 48288dd4a102f..2d15c0e7b0550 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -51,6 +51,7 @@
#include <asm/div64.h>
#include <asm/timex.h>
#include <asm/io.h>
+#include <linux/sched/isolation.h>
#include "tick-internal.h"
#include "timer_migration.h"
@@ -2657,6 +2658,47 @@ int timers_prepare_cpu(unsigned int cpu)
return 0;
}
+/**
+ * timers_resettle_from_cpu - resettles timers from
+ * specified cpu to housekeeping cpus.
+ */
+void timers_resettle_from_cpu(unsigned int cpu)
+{
+ struct timer_base *old_base;
+ struct timer_base *new_base;
+ int b, i;
+
+ local_irq_disable();
+ for (b = 0; b < NR_BASES; b++) {
+ old_base = per_cpu_ptr(&timer_bases[b], cpu);
+ new_base = per_cpu_ptr(&timer_bases[b],
+ cpumask_any_and(cpu_active_mask,
+ housekeeping_cpumask(HK_TYPE_TIMER)));
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+ raw_spin_lock_irq(&new_base->lock);
+ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+ /*
+ * The current CPUs base clock might be stale. Update it
+ * before moving the timers over.
+ */
+ forward_timer_base(new_base);
+
+ WARN_ON_ONCE(old_base->running_timer);
+ old_base->running_timer = NULL;
+
+ for (i = 0; i < WHEEL_SIZE; i++)
+ migrate_timer_list(new_base, old_base->vectors + i);
+
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock_irq(&new_base->lock);
+ }
+ local_irq_enable();
+}
+
int timers_dead_cpu(unsigned int cpu)
{
struct timer_base *old_base;
--
2.45.0
Powered by blists - more mailing lists