[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <ea12146a9f431b0a6f3ff30b0197c3f4a1d807f1.1770760558.git.tim.c.chen@linux.intel.com>
Date: Tue, 10 Feb 2026 14:18:59 -0800
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
K Prateek Nayak <kprateek.nayak@....com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>,
Vincent Guittot <vincent.guittot@...aro.org>
Cc: Chen Yu <yu.c.chen@...el.com>,
Juri Lelli <juri.lelli@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Madadi Vineeth Reddy <vineethr@...ux.ibm.com>,
Hillf Danton <hdanton@...a.com>,
Shrikanth Hegde <sshegde@...ux.ibm.com>,
Jianyong Wu <jianyong.wu@...look.com>,
Yangyu Chen <cyy@...self.name>,
Tingyin Duan <tingyin.duan@...il.com>,
Vern Hao <vernhao@...cent.com>,
Vern Hao <haoxing990@...il.com>,
Len Brown <len.brown@...el.com>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Aubrey Li <aubrey.li@...el.com>,
Zhao Liu <zhao1.liu@...el.com>,
Chen Yu <yu.chen.surf@...il.com>,
Adam Li <adamli@...amperecomputing.com>,
Aaron Lu <ziqianlu@...edance.com>,
Tim Chen <tim.c.chen@...el.com>,
Josh Don <joshdon@...gle.com>,
Gavin Guo <gavinguo@...lia.com>,
Qais Yousef <qyousef@...alina.io>,
Libo Chen <libchen@...estorage.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 19/21] sched/cache: Add user control to adjust the aggressiveness of cache-aware scheduling
From: Chen Yu <yu.c.chen@...el.com>
Introduce a set of debugfs knobs to control how aggressive the
cache aware scheduling do the task aggregation.
(1) llc_aggr_tolerance
With sched_cache enabled, the scheduler uses a process's RSS as a
proxy for its LLC footprint to determine if aggregating tasks on the
preferred LLC could cause cache contention. If RSS exceeds the LLC
size, aggregation is skipped. Some workloads with large RSS but small
actual memory footprints may still benefit from aggregation. Since
the kernel cannot efficiently track per-task cache usage (resctrl is
user-space only), userspace can provide a more accurate hint.
Introduce /sys/kernel/debug/sched/llc_aggr_tolerance to let
users control how strictly RSS limits aggregation. Values range from
0 to 100:
- 0: Cache-aware scheduling is disabled.
- 1: Strict; tasks with RSS larger than LLC size are skipped.
- >=100: Aggressive; tasks are aggregated regardless of RSS.
For example, with a 32MB L3 cache:
- llc_aggr_tolerance=1 -> tasks with RSS > 32MB are skipped.
- llc_aggr_tolerance=99 -> tasks with RSS > 784GB are skipped
(784GB = (1 + (99 - 1) * 256) * 32MB).
Similarly, /sys/kernel/debug/sched/llc_aggr_tolerance also controls
how strictly the number of active threads is considered when doing
cache aware load balance. The number of SMTs is also considered.
High SMT counts reduce the aggregation capacity, preventing excessive
task aggregation on SMT-heavy systems like Power10/Power11.
Yangyu suggested introducing separate aggregation controls for the
number of active threads and memory RSS checks. Since there are plans
to add per-process/task group controls, fine-grained tunables are
deferred to that implementation.
(2) llc_epoch_period, llc_epoch_affinity_timeout,
llc_imb_pct, llc_overaggr_pct are also turned into tunable.
Suggested-by: K Prateek Nayak <kprateek.nayak@....com>
Suggested-by: Madadi Vineeth Reddy <vineethr@...ux.ibm.com>
Suggested-by: Shrikanth Hegde <sshegde@...ux.ibm.com>
Suggested-by: Tingyin Duan <tingyin.duan@...il.com>
Suggested-by: Jianyong Wu <jianyong.wu@...look.com>
Suggested-by: Yangyu Chen <cyy@...self.name>
Co-developed-by: Tim Chen <tim.c.chen@...ux.intel.com>
Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
---
Notes:
v2->v3:
Simplify the implementation by using debugfs_create_u32() for all
tunable parameters.
kernel/sched/debug.c | 10 ++++++++
kernel/sched/fair.c | 59 ++++++++++++++++++++++++++++++++++++++------
kernel/sched/sched.h | 5 ++++
3 files changed, 67 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index bae747eddc59..dc4b7de6569f 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -566,6 +566,16 @@ static __init int sched_init_debug(void)
#ifdef CONFIG_SCHED_CACHE
debugfs_create_file("llc_enabled", 0644, debugfs_sched, NULL,
&sched_cache_enable_fops);
+ debugfs_create_u32("llc_aggr_tolerance", 0644, debugfs_sched,
+ &llc_aggr_tolerance);
+ debugfs_create_u32("llc_epoch_period", 0644, debugfs_sched,
+ &llc_epoch_period);
+ debugfs_create_u32("llc_epoch_affinity_timeout", 0644, debugfs_sched,
+ &llc_epoch_affinity_timeout);
+ debugfs_create_u32("llc_overaggr_pct", 0644, debugfs_sched,
+ &llc_overaggr_pct);
+ debugfs_create_u32("llc_imb_pct", 0644, debugfs_sched,
+ &llc_imb_pct);
#endif
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee4982af2bdd..da4291ace24c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1191,6 +1191,12 @@ static void set_next_buddy(struct sched_entity *se);
#define EPOCH_PERIOD (HZ / 100) /* 10 ms */
#define EPOCH_LLC_AFFINITY_TIMEOUT 5 /* 50 ms */
+__read_mostly unsigned int llc_aggr_tolerance = 1;
+__read_mostly unsigned int llc_epoch_period = EPOCH_PERIOD;
+__read_mostly unsigned int llc_epoch_affinity_timeout = EPOCH_LLC_AFFINITY_TIMEOUT;
+__read_mostly unsigned int llc_imb_pct = 20;
+__read_mostly unsigned int llc_overaggr_pct = 50;
+
static int llc_id(int cpu)
{
if (cpu < 0)
@@ -1223,10 +1229,22 @@ static inline bool valid_llc_buf(struct sched_domain *sd,
return valid_llc_id(id);
}
+static inline int get_sched_cache_scale(int mul)
+{
+ if (!llc_aggr_tolerance)
+ return 0;
+
+ if (llc_aggr_tolerance >= 100)
+ return INT_MAX;
+
+ return (1 + (llc_aggr_tolerance - 1) * mul);
+}
+
static bool exceed_llc_capacity(struct mm_struct *mm, int cpu)
{
struct cacheinfo *ci;
u64 rss, llc;
+ int scale;
/*
* get_cpu_cacheinfo_level() can not be used
@@ -1251,20 +1269,47 @@ static bool exceed_llc_capacity(struct mm_struct *mm, int cpu)
rss = get_mm_counter(mm, MM_ANONPAGES) +
get_mm_counter(mm, MM_SHMEMPAGES);
- return (llc <= (rss * PAGE_SIZE));
+ /*
+ * Scale the LLC size by 256*llc_aggr_tolerance
+ * and compare it to the task's RSS size.
+ *
+ * Suppose the L3 size is 32MB. If the
+ * llc_aggr_tolerance is 1:
+ * When the RSS is larger than 32MB, the process
+ * is regarded as exceeding the LLC capacity. If
+ * the llc_aggr_tolerance is 99:
+ * When the RSS is larger than 784GB, the process
+ * is regarded as exceeding the LLC capacity:
+ * 784GB = (1 + (99 - 1) * 256) * 32MB
+ * If the llc_aggr_tolerance is 100:
+ * ignore the RSS.
+ */
+ scale = get_sched_cache_scale(256);
+ if (scale == INT_MAX)
+ return false;
+
+ return ((llc * scale) <= (rss * PAGE_SIZE));
}
static bool exceed_llc_nr(struct mm_struct *mm, int cpu)
{
- int smt_nr = 1;
+ int smt_nr = 1, scale;
#ifdef CONFIG_SCHED_SMT
if (sched_smt_active())
smt_nr = cpumask_weight(cpu_smt_mask(cpu));
#endif
+ /*
+ * Scale the number of 'cores' in a LLC by llc_aggr_tolerance
+ * and compare it to the task's active threads.
+ */
+ scale = get_sched_cache_scale(1);
+ if (scale == INT_MAX)
+ return false;
+
return !fits_capacity((mm->sc_stat.nr_running_avg * smt_nr),
- per_cpu(sd_llc_size, cpu));
+ (scale * per_cpu(sd_llc_size, cpu)));
}
static void account_llc_enqueue(struct rq *rq, struct task_struct *p)
@@ -1365,7 +1410,7 @@ static inline void __update_mm_sched(struct rq *rq,
long delta = now - rq->cpu_epoch_next;
if (delta > 0) {
- n = (delta + EPOCH_PERIOD - 1) / EPOCH_PERIOD;
+ n = (delta + llc_epoch_period - 1) / llc_epoch_period;
rq->cpu_epoch += n;
rq->cpu_epoch_next += n * EPOCH_PERIOD;
__shr_u64(&rq->cpu_runtime, n);
@@ -1460,7 +1505,7 @@ void account_mm_sched(struct rq *rq, struct task_struct *p, s64 delta_exec)
* has only 1 thread, invalidate its preferred state.
*/
if (time_after(epoch,
- READ_ONCE(mm->sc_stat.epoch) + EPOCH_LLC_AFFINITY_TIMEOUT) ||
+ READ_ONCE(mm->sc_stat.epoch) + llc_epoch_affinity_timeout) ||
get_nr_threads(p) <= 1 ||
exceed_llc_nr(mm, cpu_of(rq)) ||
exceed_llc_capacity(mm, cpu_of(rq))) {
@@ -9920,7 +9965,7 @@ static inline int task_is_ineligible_on_dst_cpu(struct task_struct *p, int dest_
* (default: ~50%)
*/
#define fits_llc_capacity(util, max) \
- ((util) * 2 < (max))
+ ((util) * 100 < (max) * llc_overaggr_pct)
/*
* The margin used when comparing utilization.
@@ -9930,7 +9975,7 @@ static inline int task_is_ineligible_on_dst_cpu(struct task_struct *p, int dest_
*/
/* Allows dst util to be bigger than src util by up to bias percent */
#define util_greater(util1, util2) \
- ((util1) * 100 > (util2) * 120)
+ ((util1) * 100 > (util2) * (100 + llc_imb_pct))
/* Called from load balancing paths with rcu_read_lock held */
static __maybe_unused bool get_llc_stats(int cpu, unsigned long *util,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index adf3428745dd..f4785f84b1f1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3919,6 +3919,11 @@ static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct
DECLARE_STATIC_KEY_FALSE(sched_cache_present);
DECLARE_STATIC_KEY_FALSE(sched_cache_active);
extern int max_llcs, sysctl_sched_cache_user;
+extern unsigned int llc_aggr_tolerance;
+extern unsigned int llc_epoch_period;
+extern unsigned int llc_epoch_affinity_timeout;
+extern unsigned int llc_imb_pct;
+extern unsigned int llc_overaggr_pct;
static inline bool sched_cache_enabled(void)
{
--
2.32.0
Powered by blists - more mailing lists