[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250417191543.1781862-3-libo.chen@oracle.com>
Date: Thu, 17 Apr 2025 12:15:43 -0700
From: Libo Chen <libo.chen@...cle.com>
To: peterz@...radead.org, mgorman@...e.de, mingo@...hat.com,
juri.lelli@...hat.com, vincent.guittot@...aro.org, tj@...nel.org,
akpm@...ux-foundation.org, rostedt@...dmis.org, llong@...hat.com
Cc: kprateek.nayak@....com, raghavendra.kt@....com, yu.c.chen@...el.com,
tim.c.chen@...el.com, vineethr@...ux.ibm.com, chris.hyser@...cle.com,
daniel.m.jordan@...cle.com, lorenzo.stoakes@...cle.com,
mkoutny@...e.com, Dhaval.Giani@....com, cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 2/2] sched/numa: Add tracepoint that tracks the skipping of numa balancing due to cpuset memory pinning
Unlike sched_skip_vma_numa tracepoint which tracks skipped VMAs, this
tracks the task subjected to cpuset.mems pinning and prints out its
allowed memory node mask.
Signed-off-by: Libo Chen <libo.chen@...cle.com>
---
include/trace/events/sched.h | 30 ++++++++++++++++++++++++++++++
kernel/sched/fair.c | 4 +++-
2 files changed, 33 insertions(+), 1 deletion(-)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8994e97d86c13..25ee542fa0063 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -745,6 +745,36 @@ TRACE_EVENT(sched_skip_vma_numa,
__entry->vm_end,
__print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
);
+
+TRACE_EVENT(sched_skip_cpuset_numa,
+
+ TP_PROTO(struct task_struct *tsk, nodemask_t *mem_allowed_ptr),
+
+ TP_ARGS(tsk, mem_allowed_ptr),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( pid_t, tgid )
+ __field( pid_t, ngid )
+ __field( nodemask_t *, mem_allowed_ptr )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = task_pid_nr(tsk);
+ __entry->tgid = task_tgid_nr(tsk);
+ __entry->ngid = task_numa_group_id(tsk);
+ __entry->mem_allowed_ptr = mem_allowed_ptr;
+ ),
+
+ TP_printk("comm=%s pid=%d tgid=%d ngid=%d mem_nodes_allowed=%*pbl",
+ __entry->comm,
+ __entry->pid,
+ __entry->tgid,
+ __entry->ngid,
+ nodemask_pr_args(__entry->mem_allowed_ptr))
+);
#endif /* CONFIG_NUMA_BALANCING */
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c9903b1b39487..cc892961ce157 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3333,8 +3333,10 @@ static void task_numa_work(struct callback_head *work)
* Memory is pinned to only one NUMA node via cpuset.mems, naturally
* no page can be migrated.
*/
- if (cpusets_enabled() && nodes_weight(cpuset_current_mems_allowed) == 1)
+ if (cpusets_enabled() && nodes_weight(cpuset_current_mems_allowed) == 1) {
+ trace_sched_skip_cpuset_numa(current, &cpuset_current_mems_allowed);
return;
+ }
if (!mm->numa_next_scan) {
mm->numa_next_scan = now +
--
2.43.5
Powered by blists - more mailing lists