lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 31 Aug 2023 16:15:06 +0530
From:   K Prateek Nayak <kprateek.nayak@....com>
To:     <void@...ifault.com>
CC:     <linux-kernel@...r.kernel.org>, <peterz@...radead.org>,
        <mingo@...hat.com>, <juri.lelli@...hat.com>,
        <vincent.guittot@...aro.org>, <dietmar.eggemann@....com>,
        <rostedt@...dmis.org>, <bsegall@...gle.com>, <mgorman@...e.de>,
        <bristot@...hat.com>, <vschneid@...hat.com>, <tj@...nel.org>,
        <roman.gushchin@...ux.dev>, <gautham.shenoy@....com>,
        <aaron.lu@...el.com>, <wuyun.abel@...edance.com>,
        <kernel-team@...a.com>, <kprateek.nayak@....com>
Subject: [RFC PATCH 1/3] sched/fair: Move SHARED_RUNQ related structs and definitions into sched.h

Move struct shared_runq_shard, struct shared_runq, SHARED_RUNQ_SHARD_SZ
and SHARED_RUNQ_MAX_SHARDS definitions into sched.h

Signed-off-by: K Prateek Nayak <kprateek.nayak@....com>
---
 kernel/sched/fair.c  | 68 --------------------------------------------
 kernel/sched/sched.h | 68 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 68 insertions(+), 68 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d67d86d3bfdf..bf844ffa79c2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -139,74 +139,6 @@ static int __init setup_sched_thermal_decay_shift(char *str)
 }
 __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
 
-/**
- * struct shared_runq - Per-LLC queue structure for enqueuing and migrating
- * runnable tasks within an LLC.
- *
- * struct shared_runq_shard - A structure containing a task list and a spinlock
- * for a subset of cores in a struct shared_runq.
- *
- * WHAT
- * ====
- *
- * This structure enables the scheduler to be more aggressively work
- * conserving, by placing waking tasks on a per-LLC FIFO queue shard that can
- * then be pulled from when another core in the LLC is going to go idle.
- *
- * struct rq stores two pointers in its struct cfs_rq:
- *
- * 1. The per-LLC struct shared_runq which contains one or more shards of
- *    enqueued tasks.
- *
- * 2. The shard inside of the per-LLC struct shared_runq which contains the
- *    list of runnable tasks for that shard.
- *
- * Waking tasks are enqueued in the calling CPU's struct shared_runq_shard in
- * __enqueue_entity(), and are opportunistically pulled from the shared_runq in
- * newidle_balance(). Pulling from shards is an O(# shards) operation.
- *
- * There is currently no task-stealing between shared_runqs in different LLCs,
- * which means that shared_runq is not fully work conserving. This could be
- * added at a later time, with tasks likely only being stolen across
- * shared_runqs on the same NUMA node to avoid violating NUMA affinities.
- *
- * HOW
- * ===
- *
- * A struct shared_runq_shard is comprised of a list, and a spinlock for
- * synchronization.  Given that the critical section for a shared_runq is
- * typically a fast list operation, and that the shared_runq_shard is localized
- * to a subset of cores on a single LLC (plus other cores in the LLC that pull
- * from the shard in newidle_balance()), the spinlock will typically only be
- * contended on workloads that do little else other than hammer the runqueue.
- *
- * WHY
- * ===
- *
- * As mentioned above, the main benefit of shared_runq is that it enables more
- * aggressive work conservation in the scheduler. This can benefit workloads
- * that benefit more from CPU utilization than from L1/L2 cache locality.
- *
- * shared_runqs are segmented across LLCs both to avoid contention on the
- * shared_runq spinlock by minimizing the number of CPUs that could contend on
- * it, as well as to strike a balance between work conservation, and L3 cache
- * locality.
- */
-struct shared_runq_shard {
-	struct list_head list;
-	raw_spinlock_t lock;
-} ____cacheline_aligned;
-
-/* This would likely work better as a configurable knob via debugfs */
-#define SHARED_RUNQ_SHARD_SZ 6
-#define SHARED_RUNQ_MAX_SHARDS \
-	((NR_CPUS / SHARED_RUNQ_SHARD_SZ) + (NR_CPUS % SHARED_RUNQ_SHARD_SZ != 0))
-
-struct shared_runq {
-	unsigned int num_shards;
-	struct shared_runq_shard shards[SHARED_RUNQ_MAX_SHARDS];
-} ____cacheline_aligned;
-
 #ifdef CONFIG_SMP
 
 static DEFINE_PER_CPU(struct shared_runq, shared_runqs);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b504f8f4416b..f50176f720b1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -545,6 +545,74 @@ do {									\
 # define u64_u32_load(var)      u64_u32_load_copy(var, var##_copy)
 # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
 
+/**
+ * struct shared_runq - Per-LLC queue structure for enqueuing and migrating
+ * runnable tasks within an LLC.
+ *
+ * struct shared_runq_shard - A structure containing a task list and a spinlock
+ * for a subset of cores in a struct shared_runq.
+ *
+ * WHAT
+ * ====
+ *
+ * This structure enables the scheduler to be more aggressively work
+ * conserving, by placing waking tasks on a per-LLC FIFO queue shard that can
+ * then be pulled from when another core in the LLC is going to go idle.
+ *
+ * struct rq stores two pointers in its struct cfs_rq:
+ *
+ * 1. The per-LLC struct shared_runq which contains one or more shards of
+ *    enqueued tasks.
+ *
+ * 2. The shard inside of the per-LLC struct shared_runq which contains the
+ *    list of runnable tasks for that shard.
+ *
+ * Waking tasks are enqueued in the calling CPU's struct shared_runq_shard in
+ * __enqueue_entity(), and are opportunistically pulled from the shared_runq in
+ * newidle_balance(). Pulling from shards is an O(# shards) operation.
+ *
+ * There is currently no task-stealing between shared_runqs in different LLCs,
+ * which means that shared_runq is not fully work conserving. This could be
+ * added at a later time, with tasks likely only being stolen across
+ * shared_runqs on the same NUMA node to avoid violating NUMA affinities.
+ *
+ * HOW
+ * ===
+ *
+ * A struct shared_runq_shard is comprised of a list, and a spinlock for
+ * synchronization.  Given that the critical section for a shared_runq is
+ * typically a fast list operation, and that the shared_runq_shard is localized
+ * to a subset of cores on a single LLC (plus other cores in the LLC that pull
+ * from the shard in newidle_balance()), the spinlock will typically only be
+ * contended on workloads that do little else other than hammer the runqueue.
+ *
+ * WHY
+ * ===
+ *
+ * As mentioned above, the main benefit of shared_runq is that it enables more
+ * aggressive work conservation in the scheduler. This can benefit workloads
+ * that benefit more from CPU utilization than from L1/L2 cache locality.
+ *
+ * shared_runqs are segmented across LLCs both to avoid contention on the
+ * shared_runq spinlock by minimizing the number of CPUs that could contend on
+ * it, as well as to strike a balance between work conservation, and L3 cache
+ * locality.
+ */
+struct shared_runq_shard {
+	struct list_head list;
+	raw_spinlock_t lock;
+} ____cacheline_aligned;
+
+/* This would likely work better as a configurable knob via debugfs */
+#define SHARED_RUNQ_SHARD_SZ 6
+#define SHARED_RUNQ_MAX_SHARDS \
+	((NR_CPUS / SHARED_RUNQ_SHARD_SZ) + (NR_CPUS % SHARED_RUNQ_SHARD_SZ != 0))
+
+struct shared_runq {
+	unsigned int num_shards;
+	struct shared_runq_shard shards[SHARED_RUNQ_MAX_SHARDS];
+} ____cacheline_aligned;
+
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
 	struct load_weight	load;
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ