lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230613052004.2836135-3-void@manifault.com>
Date:   Tue, 13 Jun 2023 00:20:03 -0500
From:   David Vernet <void@...ifault.com>
To:     linux-kernel@...r.kernel.org
Cc:     mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
        vincent.guittot@...aro.org, rostedt@...dmis.org,
        dietmar.eggemann@....com, bsegall@...gle.com, mgorman@...e.de,
        bristot@...hat.com, vschneid@...hat.com, joshdon@...gle.com,
        roman.gushchin@...ux.dev, tj@...nel.org, kernel-team@...a.com
Subject: [RFC PATCH 2/3] sched/fair: Add SWQUEUE sched feature and skeleton calls

For certain workloads in CFS, CPU utilization is of the upmost
importance. For example, at Meta, our main web workload benefits from a
1 - 1.5% improvement in RPS, and a 1 - 2% improvement in p99 latency,
when CPU utilization is pushed as high as possible.

This is likely something that would be useful for any workload with long
slices, or for which avoiding migration is unlikely to result in
improved cache locality.

We will soon be enabling more aggressive load balancing via a new
feature called swqueue, which places tasks into a FIFO queue on the
wakeup path, and then dequeues them when a core goes idle before
invoking newidle_balance(). We don't want to enable the feature by
default, so this patch defines and declares a new scheduler feature
called SWQUEUE which is disabled by default. In addition, we add some
calls to empty / skeleton functions in the relevant fair codepaths where
swqueue will be utilized.

A set of future patches will implement these functions, and enable
swqueue for both single and multi socket / CCX architectures.

Originally-by: Roman Gushchin <roman.gushchin@...ux.dev>
Signed-off-by: David Vernet <void@...ifault.com>
---
 kernel/sched/fair.c     | 35 +++++++++++++++++++++++++++++++++++
 kernel/sched/features.h |  1 +
 2 files changed, 36 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 292c593fc84f..807986bd6ea6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -140,6 +140,17 @@ static int __init setup_sched_thermal_decay_shift(char *str)
 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
 
 #ifdef CONFIG_SMP
+static void swqueue_enqueue(struct rq *rq, struct task_struct *p,
+			    int enq_flags)
+{}
+static int swqueue_pick_next_task(struct rq *rq, struct rq_flags *rf)
+{
+	return 0;
+}
+
+static void swqueue_remove_task(struct task_struct *p)
+{}
+
 /*
  * For asym packing, by default the lower numbered CPU has higher priority.
  */
@@ -162,6 +173,17 @@ int __weak arch_asym_cpu_priority(int cpu)
  * (default: ~5%)
  */
 #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
+#else
+static void swqueue_enqueue(struct rq *rq, struct task_struct *p,
+			    int enq_flags)
+{}
+static int swqueue_pick_next_task(struct rq *rq, struct rq_flags *rf)
+{
+	return 0;
+}
+
+static void swqueue_remove_task(struct task_struct *p)
+{}
 #endif
 
 #ifdef CONFIG_CFS_BANDWIDTH
@@ -6368,6 +6390,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 	if (!task_new)
 		update_overutilized_status(rq);
 
+	if (sched_feat(SWQUEUE))
+		swqueue_enqueue(rq, p, flags);
+
 enqueue_throttle:
 	assert_list_leaf_cfs_rq(rq);
 
@@ -6449,6 +6474,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 dequeue_throttle:
 	util_est_update(&rq->cfs, p, task_sleep);
 	hrtick_update(rq);
+
+	if (sched_feat(SWQUEUE))
+		swqueue_remove_task(p);
 }
 
 #ifdef CONFIG_SMP
@@ -8155,12 +8183,18 @@ done: __maybe_unused;
 
 	update_misfit_status(p, rq);
 
+	if (sched_feat(SWQUEUE))
+		swqueue_remove_task(p);
+
 	return p;
 
 idle:
 	if (!rf)
 		return NULL;
 
+	if (sched_feat(SWQUEUE) && swqueue_pick_next_task(rq, rf))
+		return RETRY_TASK;
+
 	new_tasks = newidle_balance(rq, rf);
 
 	/*
@@ -12325,6 +12359,7 @@ static void attach_task_cfs_rq(struct task_struct *p)
 
 static void switched_from_fair(struct rq *rq, struct task_struct *p)
 {
+	swqueue_remove_task(p);
 	detach_task_cfs_rq(p);
 }
 
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index ee7f23c76bd3..57b19bc70cd4 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -101,3 +101,4 @@ SCHED_FEAT(LATENCY_WARN, false)
 
 SCHED_FEAT(ALT_PERIOD, true)
 SCHED_FEAT(BASE_SLICE, true)
+SCHED_FEAT(SWQUEUE, false)
-- 
2.40.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ