[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250508145319.97794-14-gmonaco@redhat.com>
Date: Thu, 8 May 2025 16:53:26 +0200
From: Gabriele Monaco <gmonaco@...hat.com>
To: linux-kernel@...r.kernel.org,
Frederic Weisbecker <frederic@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Waiman Long <longman@...hat.com>
Cc: Gabriele Monaco <gmonaco@...hat.com>
Subject: [PATCH v5 6/6] timers: Exclude isolated cpus from timer migation
The timer migration mechanism allows active CPUs to pull timers from
idle ones to improve the overall idle time. This is however undesired
when CPU intensive workloads run on isolated cores, as the algorithm
would move the timers from housekeeping to isolated cores, negatively
affecting the isolation.
This effect was noticed on a 128 cores machine running oslat on the
isolated cores (1-31,33-63,65-95,97-127). The tool monopolises CPUs,
and the CPU with lowest count in a timer migration hierarchy (here 1
and 65) appears as always active and continuously pulls global timers,
from the housekeeping CPUs. This ends up moving driver work (e.g.
delayed work) to isolated CPUs and causes latency spikes:
before the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 1203 10 3 4 ... 5 (us)
after the change:
# oslat -c 1-31,33-63,65-95,97-127 -D 62s
...
Maximum: 10 4 3 4 3 ... 5 (us)
Exclude isolated cores from the timer migration algorithm, extend the
concept of unavailable cores, currently used for offline ones, to
isolated ones:
* A core is unavailable if isolated or offline;
* A core is available if isolated and offline;
A core is considered unavailable as idle if:
* is in the isolcpus list
* is in the nohz_full list
* is in an isolated cpuset
Due to how the timer migration algorithm works, any CPU part of the
hierarchy can have their global timers pulled by remote CPUs and have to
pull remote timers, only skipping pulling remote timers would break the
logic.
For this reason, we prevent isolated CPUs from pulling remote global
timers, but also the other way around: any global timer started on an
isolated CPU will run there. This does not break the concept of
isolation (global timers don't come from outside the CPU) and, if
considered inappropriate, can usually be mitigated with other isolation
techniques (e.g. IRQ pinning).
Signed-off-by: Gabriele Monaco <gmonaco@...hat.com>
---
include/linux/timer.h | 9 ++++++++
kernel/cgroup/cpuset.c | 3 +++
kernel/time/timer_migration.c | 43 +++++++++++++++++++++++++++++++++++
3 files changed, 55 insertions(+)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 10596d7c3a34..a8b683d9ce25 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -190,4 +190,13 @@ int timers_dead_cpu(unsigned int cpu);
#define timers_dead_cpu NULL
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern int tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask);
+#else
+static inline int tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 2f1df6f5b988..6e36e333d8b1 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1375,6 +1375,9 @@ static void update_exclusion_cpumasks(bool isolcpus_updated)
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
+
+ ret = tmigr_isolated_exclude_cpumask(isolated_cpus);
+ WARN_ON_ONCE(ret < 0);
}
/**
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 25439f961ccf..fb27e929e2cf 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
#include <trace/events/ipi.h>
+#include <linux/sched/isolation.h>
#include "timer_migration.h"
#include "tick-internal.h"
@@ -1478,6 +1479,16 @@ static int tmigr_cpu_available(unsigned int cpu)
if (WARN_ON_ONCE(!tmc->tmgroup))
return -EINVAL;
+ /*
+ * Domain isolated CPUs don't participate in timer migration.
+ * Checking here guarantees that CPUs isolated at boot (e.g. isolcpus)
+ * are not marked as available when they first become online.
+ * During runtime, any offline isolated CPU is also not incorrectly
+ * marked as available once it gets back online.
+ */
+ if (!housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
+ cpuset_cpu_is_isolated(cpu))
+ return 0;
raw_spin_lock_irq(&tmc->lock);
trace_tmigr_cpu_available(tmc);
tmc->idle = timer_base_is_idle();
@@ -1489,6 +1500,38 @@ static int tmigr_cpu_available(unsigned int cpu)
return 0;
}
+static void tmigr_remote_cpu_unavailable(void *ignored)
+{
+ tmigr_cpu_unavailable(smp_processor_id());
+}
+
+static void tmigr_remote_cpu_available(void *ignored)
+{
+ tmigr_cpu_available(smp_processor_id());
+}
+
+int tmigr_isolated_exclude_cpumask(cpumask_var_t exclude_cpumask)
+{
+ cpumask_var_t cpumask;
+ int ret = 0;
+
+ lockdep_assert_cpus_held();
+
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_and(cpumask, exclude_cpumask, tmigr_available_cpumask);
+ cpumask_and(cpumask, cpumask, housekeeping_cpumask(HK_TYPE_TICK));
+ on_each_cpu_mask(cpumask, tmigr_remote_cpu_unavailable, NULL, 0);
+
+ cpumask_andnot(cpumask, cpu_online_mask, exclude_cpumask);
+ cpumask_andnot(cpumask, cpumask, tmigr_available_cpumask);
+ on_each_cpu_mask(cpumask, tmigr_remote_cpu_available, NULL, 0);
+
+ free_cpumask_var(cpumask);
+ return ret;
+}
+
static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
int node)
{
--
2.49.0
Powered by blists - more mailing lists