lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250717062036.432243-5-adamli@os.amperecomputing.com>
Date: Thu, 17 Jul 2025 06:20:34 +0000
From: Adam Li <adamli@...amperecomputing.com>
To: mingo@...hat.com,
	peterz@...radead.org,
	juri.lelli@...hat.com,
	vincent.guittot@...aro.org
Cc: dietmar.eggemann@....com,
	rostedt@...dmis.org,
	bsegall@...gle.com,
	mgorman@...e.de,
	vschneid@...hat.com,
	cl@...ux.com,
	linux-kernel@...r.kernel.org,
	patches@...erecomputing.com,
	shkaushik@...erecomputing.com,
	Adam Li <adamli@...amperecomputing.com>
Subject: [RFC PATCH v2 4/6] sched/fair: Make update_sg_wakeup_stats() helper functions handle NULL pointers

update_sg_wakeup_stats() uses a set of helper functions:
  cpu_load_without(struct task_struct *p),
  cpu_runnable_without(struct task_struct *p),
  cpu_util_without(struct task_struct *p),
  task_running_on_cpu(struct task_struct *p),
  idle_cpu_without(struct task_struct *p).

update_sg_lb_stats() uses similar helper functions, without the 'p'
argument: cpu_load(), cpu_runnable(), cpu_util_cfs(), idle_cpu().

Make update_sg_wakeup_stats() helper functions handle the case when
'p==NULL'. So update_sg_lb_stats() can use the same helper functions.

This is the first step to unify update_sg_wakeup_stats() and
update_sg_lb_stats().

Signed-off-by: Adam Li <adamli@...amperecomputing.com>
---
 kernel/sched/fair.c | 95 ++++++++++++++++++++++++---------------------
 1 file changed, 50 insertions(+), 45 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index db9ec6a6acdf..69dac5b337d8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7250,7 +7250,8 @@ static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
 	unsigned int load;
 
 	/* Task has no contribution or is new */
-	if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+	if (!p || cpu_of(rq) != task_cpu(p) ||
+	    !READ_ONCE(p->se.avg.last_update_time))
 		return cpu_load(rq);
 
 	cfs_rq = &rq->cfs;
@@ -7273,7 +7274,8 @@ static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
 	unsigned int runnable;
 
 	/* Task has no contribution or is new */
-	if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+	if (!p || cpu_of(rq) != task_cpu(p) ||
+	    !READ_ONCE(p->se.avg.last_update_time))
 		return cpu_runnable(rq);
 
 	cfs_rq = &rq->cfs;
@@ -7285,6 +7287,51 @@ static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
 	return runnable;
 }
 
+/*
+ * task_running_on_cpu - return 1 if @p is running on @cpu.
+ */
+
+static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
+{
+	/* Task has no contribution or is new */
+	if (!p || cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+		return 0;
+
+	if (task_on_rq_queued(p))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * idle_cpu_without - would a given CPU be idle without p ?
+ * @cpu: the processor on which idleness is tested.
+ * @p: task which should be ignored.
+ *
+ * Return: 1 if the CPU would be idle. 0 otherwise.
+ */
+static int idle_cpu_without(int cpu, struct task_struct *p)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (!p)
+		return idle_cpu(cpu);
+
+	if (rq->curr != rq->idle && rq->curr != p)
+		return 0;
+
+	/*
+	 * rq->nr_running can't be used but an updated version without the
+	 * impact of p on cpu must be used instead. The updated nr_running
+	 * be computed and tested before calling idle_cpu_without().
+	 */
+
+	if (rq->ttwu_pending)
+		return 0;
+
+	return 1;
+}
+
 static unsigned long capacity_of(int cpu)
 {
 	return cpu_rq(cpu)->cpu_capacity;
@@ -8099,7 +8146,7 @@ unsigned long cpu_util_cfs_boost(int cpu)
 static unsigned long cpu_util_without(int cpu, struct task_struct *p)
 {
 	/* Task has no contribution or is new */
-	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
+	if (!p || cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
 		p = NULL;
 
 	return cpu_util(cpu, p, -1, 0);
@@ -10631,48 +10678,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
 
 struct sg_lb_stats;
 
-/*
- * task_running_on_cpu - return 1 if @p is running on @cpu.
- */
-
-static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
-{
-	/* Task has no contribution or is new */
-	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
-		return 0;
-
-	if (task_on_rq_queued(p))
-		return 1;
-
-	return 0;
-}
-
-/**
- * idle_cpu_without - would a given CPU be idle without p ?
- * @cpu: the processor on which idleness is tested.
- * @p: task which should be ignored.
- *
- * Return: 1 if the CPU would be idle. 0 otherwise.
- */
-static int idle_cpu_without(int cpu, struct task_struct *p)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	if (rq->curr != rq->idle && rq->curr != p)
-		return 0;
-
-	/*
-	 * rq->nr_running can't be used but an updated version without the
-	 * impact of p on cpu must be used instead. The updated nr_running
-	 * be computed and tested before calling idle_cpu_without().
-	 */
-
-	if (rq->ttwu_pending)
-		return 0;
-
-	return 1;
-}
-
 /*
  * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
  * @sd: The sched_domain level to look for idlest group.
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ