[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <7371f30196b317c0c5a0ae3fa463ec76a4dc69ef.1750268218.git.tim.c.chen@linux.intel.com>
Date: Wed, 18 Jun 2025 11:27:53 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
K Prateek Nayak <kprateek.nayak@....com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>
Cc: Tim Chen <tim.c.chen@...ux.intel.com>,
Juri Lelli <juri.lelli@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Tim Chen <tim.c.chen@...el.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Libo Chen <libo.chen@...cle.com>,
Abel Wu <wuyun.abel@...edance.com>,
Madadi Vineeth Reddy <vineethr@...ux.ibm.com>,
Hillf Danton <hdanton@...a.com>,
Len Brown <len.brown@...el.com>,
linux-kernel@...r.kernel.org,
Chen Yu <yu.c.chen@...el.com>
Subject: [RFC patch v3 05/20] sched: Add hysteresis to switch a task's preferred LLC
Switching a process's preferred LLC generates lots of task
migrations across LLCs. To avoid frequent switches
of home LLC, implement the following policy:
1. Require a 2x occ change threshold to switch preferred LLC
2. Don't discard preferred LLC for a task
Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
kernel/sched/fair.c | 24 ++++++++++++++++--------
1 file changed, 16 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6a2678f9d44a..7fb2322c5d9e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1175,6 +1175,14 @@ static s64 update_curr_se(struct rq *rq, struct sched_entity *curr)
#define EPOCH_PERIOD (HZ/100) /* 10 ms */
#define EPOCH_OLD 5 /* 50 ms */
+static int llc_id(int cpu)
+{
+ if (cpu < 0)
+ return -1;
+
+ return per_cpu(sd_llc_id, cpu);
+}
+
void mm_init_sched(struct mm_struct *mm, struct mm_sched __percpu *_pcpu_sched)
{
unsigned long epoch;
@@ -1299,6 +1307,7 @@ static void task_cache_work(struct callback_head *work)
struct task_struct *p = current;
struct mm_struct *mm = p->mm;
unsigned long m_a_occ = 0;
+ unsigned long last_m_a_occ = 0;
int cpu, m_a_cpu = -1;
cpumask_var_t cpus;
@@ -1337,11 +1346,13 @@ static void task_cache_work(struct callback_head *work)
per_cpu(sd_llc_id, i), occ, m_occ, m_cpu, nr);
}
- a_occ /= nr;
+ // a_occ /= nr;
if (a_occ > m_a_occ) {
m_a_occ = a_occ;
m_a_cpu = m_cpu;
}
+ if (llc_id(cpu) == llc_id(mm->mm_sched_cpu))
+ last_m_a_occ = a_occ;
trace_printk("(%d) a_occ: %ld m_a_occ: %ld\n",
per_cpu(sd_llc_id, cpu), a_occ, m_a_occ);
@@ -1355,13 +1366,10 @@ static void task_cache_work(struct callback_head *work)
}
}
- /*
- * If the max average cache occupancy is 'small' we don't care.
- */
- if (m_a_occ < (NICE_0_LOAD >> EPOCH_OLD))
- m_a_cpu = -1;
-
- mm->mm_sched_cpu = m_a_cpu;
+ if (m_a_occ > (2 * last_m_a_occ)) {
+ /* avoid the bouncing of mm_sched_cpu */
+ mm->mm_sched_cpu = m_a_cpu;
+ }
free_cpumask_var(cpus);
}
--
2.32.0
Powered by blists - more mailing lists