[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260122161647.142704-1-realwujing@gmail.com>
Date: Thu, 22 Jan 2026 11:16:46 -0500
From: Qiliang Yuan <realwujing@...il.com>
To: mingo@...hat.com,
peterz@...radead.org,
juri.lelli@...hat.com,
vincent.guittot@...aro.org
Cc: dietmar.eggemann@....com,
rostedt@...dmis.org,
bsegall@...gle.com,
mgorman@...e.de,
vschneid@...hat.com,
linux-kernel@...r.kernel.org,
yuanql9@...natelecom.cn,
Qiliang Yuan <realwujing@...il.com>
Subject: [PATCH] sched/fair: Cache NUMA node statistics to avoid O(N) scanning
Optimize update_numa_stats() by leveraging pre-calculated group statistics from the load balancer hierarchy. This reduces the complexity of NUMA balancing overhead from O(CPUs_per_node) to O(1) in the hot path when stats are fresh.
Signed-off-by: Qiliang Yuan <realwujing@...il.com>
Signed-off-by: Qiliang Yuan <yuanql9@...natelecom.cn>
---
kernel/sched/fair.c | 35 +++++++++++++++++++++++++++++++++++
kernel/sched/sched.h | 7 +++++++
2 files changed, 42 insertions(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e71302282671..dc46262bd227 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2099,11 +2099,36 @@ static void update_numa_stats(struct task_numa_env *env,
bool find_idle)
{
int cpu, idle_core = -1;
+ struct sched_domain *sd;
+ struct sched_group *sg;
memset(ns, 0, sizeof(*ns));
ns->idle_cpu = -1;
rcu_read_lock();
+ /* Algorithmic Optimization: Avoid O(N) scan by using cached stats from load balancer */
+ sd = rcu_dereference(per_cpu(sd_numa, env->src_cpu));
+ if (sd && !find_idle) {
+ sg = sd->groups;
+ do {
+ /* Check if this group corresponds to the node we are interested in */
+ if (cpumask_test_cpu(cpumask_first(cpumask_of_node(nid)), sched_group_span(sg))) {
+ /* Use cached stats if they are recent enough (e.g. within 10ms) */
+ if (time_before(jiffies, sg->sgc->stats_update + msecs_to_jiffies(10))) {
+ ns->load = sg->sgc->load;
+ ns->runnable = sg->sgc->runnable;
+ ns->util = sg->sgc->util;
+ ns->nr_running = sg->sgc->nr_running;
+ ns->compute_capacity = sg->sgc->capacity;
+ rcu_read_unlock();
+ goto skip_scan;
+ }
+ break;
+ }
+ sg = sg->next;
+ } while (sg != sd->groups);
+ }
+
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);
@@ -2126,6 +2151,7 @@ static void update_numa_stats(struct task_numa_env *env,
}
rcu_read_unlock();
+skip_scan:
ns->weight = cpumask_weight(cpumask_of_node(nid));
ns->node_type = numa_classify(env->imbalance_pct, ns);
@@ -10488,6 +10514,15 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (sgs->group_type == group_overloaded)
sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
sgs->group_capacity;
+
+ /* Algorithmic Optimization: Cache group stats for O(1) NUMA lookups */
+ if (env->sd->flags & SD_NUMA) {
+ group->sgc->nr_running = sgs->sum_h_nr_running;
+ group->sgc->load = sgs->group_load;
+ group->sgc->util = sgs->group_util;
+ group->sgc->runnable = sgs->group_runnable;
+ WRITE_ONCE(group->sgc->stats_update, jiffies);
+ }
}
/**
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d30cca6870f5..81160790993e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2105,6 +2105,13 @@ struct sched_group_capacity {
int id;
+ /* O(1) NUMA stats cache */
+ unsigned long nr_running;
+ unsigned long load;
+ unsigned long util;
+ unsigned long runnable;
+ unsigned long stats_update;
+
unsigned long cpumask[]; /* Balance mask */
};
--
2.51.0
Powered by blists - more mailing lists