lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251217095912.GB2073381@bytedance.com>
Date: Wed, 17 Dec 2025 17:59:12 +0800
From: "Aaron Lu" <ziqianlu@...edance.com>
To: "Chen Yu" <yu.c.chen@...el.com>, "Tim Chen" <tim.c.chen@...ux.intel.com>
Cc: "Peter Zijlstra" <peterz@...radead.org>, 
	"Ingo Molnar" <mingo@...hat.com>, 
	"K Prateek Nayak" <kprateek.nayak@....com>, 
	"Gautham R . Shenoy" <gautham.shenoy@....com>, 
	"Vincent Guittot" <vincent.guittot@...aro.org>, 
	"Juri Lelli" <juri.lelli@...hat.com>, 
	"Dietmar Eggemann" <dietmar.eggemann@....com>, 
	"Steven Rostedt" <rostedt@...dmis.org>, 
	"Ben Segall" <bsegall@...gle.com>, "Mel Gorman" <mgorman@...e.de>, 
	"Valentin Schneider" <vschneid@...hat.com>, 
	"Madadi Vineeth Reddy" <vineethr@...ux.ibm.com>, 
	"Hillf Danton" <hdanton@...a.com>, 
	"Shrikanth Hegde" <sshegde@...ux.ibm.com>, 
	"Jianyong Wu" <jianyong.wu@...look.com>, 
	"Yangyu Chen" <cyy@...self.name>, 
	"Tingyin Duan" <tingyin.duan@...il.com>, 
	"Vern Hao" <vernhao@...cent.com>, "Vern Hao" <haoxing990@...il.com>, 
	"Len Brown" <len.brown@...el.com>, "Aubrey Li" <aubrey.li@...el.com>, 
	"Zhao Liu" <zhao1.liu@...el.com>, "Chen Yu" <yu.chen.surf@...il.com>, 
	"Adam Li" <adamli@...amperecomputing.com>, 
	"Tim Chen" <tim.c.chen@...el.com>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v2 23/23] -- DO NOT APPLY!!! -- sched/cache/debug: Display the per LLC occupancy for each process via proc fs

On Wed, Dec 03, 2025 at 03:07:42PM -0800, Tim Chen wrote:
> From: Chen Yu <yu.c.chen@...el.com>
> 
> Debug patch only.
> 
> Show the per-LLC occupancy in /proc/{PID}/schedstat, with each column
> corresponding to one LLC. This can be used to verify if the cache-aware
> load balancer works as expected by aggregating threads onto dedicated LLCs.
> 
> Suppose there are 2 LLCs and the sampling duration is 10 seconds:
> 
> Enable the cache aware load balance:
> 0 12281  <--- LLC0 residency delta is 0, LLC1 is 12 seconds
> 0 18881
> 0 16217
> 
> disable the cache aware load balance:
> 6497 15802
> 9299 5435
> 17811 8278
> 
> Signed-off-by: Chen Yu <yu.c.chen@...el.com>
> Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
> ---
>  fs/proc/base.c           | 22 ++++++++++++++++++++++
>  include/linux/mm_types.h | 19 +++++++++++++++++--
>  include/linux/sched.h    |  3 +++
>  kernel/sched/fair.c      | 40 ++++++++++++++++++++++++++++++++++++++--
>  4 files changed, 80 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/proc/base.c b/fs/proc/base.c
> index 6299878e3d97..f4be96f4bd01 100644
> --- a/fs/proc/base.c
> +++ b/fs/proc/base.c
> @@ -518,6 +518,28 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
>  		   (unsigned long long)task->se.sum_exec_runtime,
>  		   (unsigned long long)task->sched_info.run_delay,
>  		   task->sched_info.pcount);
> +#ifdef CONFIG_SCHED_CACHE
> +	if (sched_cache_enabled()) {
> +		struct mm_struct *mm = task->mm;
> +		u64 *llc_runtime;
> +
> +		if (!mm)
> +			return 0;
> +
> +		llc_runtime = kcalloc(max_llcs, sizeof(u64), GFP_KERNEL);
> +		if (!llc_runtime)
> +			return 0;
> +
> +		if (get_mm_per_llc_runtime(task, llc_runtime))
> +			goto out;
> +
> +		for (int i = 0; i < max_llcs; i++)
> +			seq_printf(m, "%llu ", llc_runtime[i]);

I feel it is better to also mark the current preferred LLC of this
process so that I can know how well it works.

> +		seq_puts(m, "\n");
> +out:
> +		kfree(llc_runtime);
> +	}
> +#endif
>  
>  	return 0;
>  }

BTW, is there a way to tell if a process is being taken care of by
'cache aware scheduling' or it's blocked due to its huge rss or having
too many threads?

I used below debug code to get these info through schedstat, but maybe I
missed something and there is a simpler method?

diff --git a/fs/proc/base.c b/fs/proc/base.c
index f4be96f4bd015..c709a1a1bd867 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -505,6 +505,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
 #endif
 
 #ifdef CONFIG_SCHED_INFO
+DECLARE_PER_CPU(int, sd_llc_id);
 /*
  * Provides /proc/PID/schedstat
  */
@@ -522,6 +523,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
 	if (sched_cache_enabled()) {
 		struct mm_struct *mm = task->mm;
 		u64 *llc_runtime;
+		int mm_sched_llc;
 
 		if (!mm)
 			return 0;
@@ -533,8 +535,17 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
 		if (get_mm_per_llc_runtime(task, llc_runtime))
 			goto out;
 
+		if (mm->mm_sched_cpu == -1)
+			mm_sched_llc = -1;
+		else
+			mm_sched_llc = per_cpu(sd_llc_id, mm->mm_sched_cpu);
+
+		seq_printf(m, "%llu 0x%x\n", mm->nr_running_avg, mm->mm_sched_flags);
 		for (int i = 0; i < max_llcs; i++)
-			seq_printf(m, "%llu ", llc_runtime[i]);
+			seq_printf(m, "%s%s%llu ",
+				   i == task->preferred_llc ? "*" : "",
+				   i == mm_sched_llc ? "?" : "",
+				   llc_runtime[i]);
 		seq_puts(m, "\n");
 out:
 		kfree(llc_runtime);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 255c22be7312f..06bb106d1b724 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1048,6 +1048,7 @@ struct mm_struct {
 		raw_spinlock_t mm_sched_lock;
 		unsigned long mm_sched_epoch;
 		int mm_sched_cpu;
+		int mm_sched_flags;
 		u64 nr_running_avg ____cacheline_aligned_in_smp;
 #endif
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 205208f061bb3..ab1cdba65d389 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1237,12 +1237,20 @@ static inline int get_sched_cache_scale(int mul)
 	return (1 + (llc_aggr_tolerance - 1) * mul);
 }
 
+#define MM_SCHED_EXCEED_LLC_CAPACITY	1
+#define MM_SCHED_NO_CACHE_INFO		2
+#define MM_SCHED_EXCEED_LLC_NR		4
+#define MM_SCHED_NR_THREADS		8
+
 static bool exceed_llc_capacity(struct mm_struct *mm, int cpu)
 {
 	unsigned int llc, scale;
 	struct cacheinfo *ci;
 	unsigned long rss;
 
+	mm->mm_sched_flags &= ~MM_SCHED_NO_CACHE_INFO;
+	mm->mm_sched_flags &= ~MM_SCHED_EXCEED_LLC_CAPACITY;
+
 	/*
 	 * get_cpu_cacheinfo_level() can not be used
 	 * because it requires the cpu_hotplug_lock
@@ -1257,8 +1265,10 @@ static bool exceed_llc_capacity(struct mm_struct *mm, int cpu)
 		 * L2 becomes the LLC.
 		 */
 		ci = _get_cpu_cacheinfo_level(cpu, 2);
-		if (!ci)
+		if (!ci) {
+			mm->mm_sched_flags |= MM_SCHED_NO_CACHE_INFO;
 			return true;
+		}
 	}
 
 	llc = ci->size;
@@ -1283,13 +1293,20 @@ static bool exceed_llc_capacity(struct mm_struct *mm, int cpu)
 	if (scale == INT_MAX)
 		return false;
 
-	return ((llc * scale) <= (rss * PAGE_SIZE));
+	if ((llc * scale) <= (rss * PAGE_SIZE)) {
+		mm->mm_sched_flags |= MM_SCHED_EXCEED_LLC_CAPACITY;
+		return true;
+	}
+
+	return false;
 }
 
 static bool exceed_llc_nr(struct mm_struct *mm, int cpu)
 {
 	int smt_nr = 1, scale;
 
+	mm->mm_sched_flags &= ~MM_SCHED_EXCEED_LLC_NR;
+
 #ifdef CONFIG_SCHED_SMT
 	if (sched_smt_active())
 		smt_nr = cpumask_weight(cpu_smt_mask(cpu));
@@ -1313,7 +1330,12 @@ static bool exceed_llc_nr(struct mm_struct *mm, int cpu)
 	if (scale == INT_MAX)
 		return false;
 
-	return ((mm->nr_running_avg * smt_nr) > (scale * per_cpu(sd_llc_size, cpu)));
+	if ((mm->nr_running_avg * smt_nr) > (scale * per_cpu(sd_llc_size, cpu))) {
+		mm->mm_sched_flags |= MM_SCHED_EXCEED_LLC_NR;
+		return true;
+	}
+
+	return false;
 }
 
 static void account_llc_enqueue(struct rq *rq, struct task_struct *p)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ