[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250506091534.42117-12-gmonaco@redhat.com>
Date: Tue, 6 May 2025 11:15:40 +0200
From: Gabriele Monaco <gmonaco@...hat.com>
To: linux-kernel@...r.kernel.org,
Frederic Weisbecker <frederic@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Waiman Long <longman@...hat.com>
Cc: Gabriele Monaco <gmonaco@...hat.com>
Subject: [PATCH v4 5/5] timers: Exclude isolated cpus from timer migation
The timer migration mechanism allows active CPUs to pull timers from
idle ones to improve the overall idle time. This is however undesired
when CPU intensive workloads run on isolated cores, as the algorithm
would move the timers from housekeeping to isolated cores, negatively
affecting the isolation.
This effect was noticed on a 128 cores machine running oslat on the
isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs,
and the CPU with lowest count in a timer migration hierarchy (here 1
and 65) appears as always active and continuously pulls global timers,
from the housekeeping CPUs. This ends up moving driver work (e.g.
delayed work) to isolated CPUs and causes latency spikes:
before the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 1203 10 3 4 ... 5 (us)
after the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 10 4 3 4 3 ... 5 (us)
Exclude isolated cores from the timer migration algorithm, extend the
concept of unavailable cores, currently used for offline ones, to
isolated ones:
* A core is unavailable if isolated or offline;
* A core is available if isolated and offline;
A core is considered unavailable as idle if:
* is in the isolcpus list
* is in the nohz_full list
* is in an isolated cpuset
Due to how the timer migration algorithm works, any CPU part of the
hierarchy can have their global timers pulled by remote CPUs and have to
pull remote timers, only skipping pulling remote timers would break the
logic.
For this reason, we prevent isolated CPUs from pulling remote global
timers, but also the other way around: any global timer started on an
isolated CPU will run there. This does not break the concept of
isolation (global timers don't come from outside the CPU) and, if
considered inappropriate, can usually be mitigated with other isolation
techniques (e.g. IRQ pinning).
Signed-off-by: Gabriele Monaco <gmonaco@...hat.com>
---
include/linux/timer.h | 6 ++++++
kernel/cgroup/cpuset.c | 2 ++
kernel/time/timer_migration.c | 31 ++++++++++++++++++++++++++++---
3 files changed, 36 insertions(+), 3 deletions(-)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 10596d7c3a34..4722e075d984 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -190,4 +190,10 @@ int timers_dead_cpu(unsigned int cpu);
#define timers_dead_cpu NULL
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask);
+#else
+static inline void tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask) { }
+#endif
+
#endif
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 95316d39c282..866b4b818811 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1334,6 +1334,8 @@ static void update_exclusion_cpumasks(bool isolcpus_updated)
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
+
+ tmigr_isolated_exclude_cpumask(isolated_cpus);
}
/**
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 25439f961ccf..e4b394d78a8d 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
#include <trace/events/ipi.h>
+#include <linux/sched/isolation.h>
#include "timer_migration.h"
#include "tick-internal.h"
@@ -1445,7 +1446,7 @@ static long tmigr_trigger_active(void *unused)
static int tmigr_cpu_unavailable(unsigned int cpu)
{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
int migrator;
u64 firstexp;
@@ -1472,15 +1473,24 @@ static int tmigr_cpu_unavailable(unsigned int cpu)
static int tmigr_cpu_available(unsigned int cpu)
{
- struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
+ struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
/* Check whether CPU data was successfully initialized */
if (WARN_ON_ONCE(!tmc->tmgroup))
return -EINVAL;
+ /*
+ * Isolated CPUs don't participate in timer migration.
+ * Checking here guarantees that CPUs isolated at boot (e.g. isolcpus)
+ * are not marked as available when they first become online.
+ * During runtime, any offline isolated CPU is also not incorrectly
+ * marked as available once it gets back online.
+ */
+ if (cpu_is_isolated(cpu))
+ return 0;
raw_spin_lock_irq(&tmc->lock);
trace_tmigr_cpu_available(tmc);
- tmc->idle = timer_base_is_idle();
+ tmc->idle = timer_base_remote_is_idle(cpu);
if (!tmc->idle)
__tmigr_cpu_activate(tmc);
tmc->available = true;
@@ -1489,6 +1499,21 @@ static int tmigr_cpu_available(unsigned int cpu)
return 0;
}
+void tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask)
+{
+ int cpu;
+
+ lockdep_assert_cpus_held();
+
+ for_each_cpu_and(cpu, exclude_cpumask, tmigr_available_cpumask)
+ tmigr_cpu_unavailable(cpu);
+
+ for_each_cpu_andnot(cpu, cpu_online_mask, exclude_cpumask) {
+ if (!cpumask_test_cpu(cpu, tmigr_available_cpumask))
+ tmigr_cpu_available(cpu);
+ }
+}
+
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
int node)
{
--
2.49.0
Powered by blists - more mailing lists