lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20250208075747.14104-1-15645113830zzh@gmail.com>
Date: Sat,  8 Feb 2025 15:57:48 +0800
From: zihan zhou <15645113830zzh@...il.com>
To: 15645113830zzh@...il.com
Cc: bsegall@...gle.com,
	dietmar.eggemann@....com,
	juri.lelli@...hat.com,
	linux-kernel@...r.kernel.org,
	mgorman@...e.de,
	mingo@...hat.com,
	peterz@...radead.org,
	rostedt@...dmis.org,
	vincent.guittot@...aro.org,
	vschneid@...hat.com
Subject: [PATCH V3 2/2] sched: Reduce the default slice to avoid tasks getting an extra tick

Update sysctl_sched_base_slice with debugfs, limit its value and
update normalized_sysctl_sched_base_slice.

Rename the function sched_update_scaling to sched_update_scaling_or_slice,
it is no longer related to CONFIG_SMP and return void, as the previous
function always returns 0 and there is no need to judge the return value
again, just like the function update_sysctl.

Signed-off-by: zihan zhou <15645113830zzh@...il.com>
Reviewed-by: Vincent Guittot <vincent.guittot@...aro.org>
---
 kernel/sched/debug.c | 52 +++++++++++++++++++++++++++++++++++++++++---
 kernel/sched/fair.c  |  8 +++----
 kernel/sched/sched.h |  4 ++--
 3 files changed, 54 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index fd7e85220715..f40d9d638939 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -166,6 +166,53 @@ static const struct file_operations sched_feat_fops = {
 	.release	= single_release,
 };
 
+static ssize_t sched_base_slice_write(struct file *filp, const char __user *ubuf,
+				   size_t cnt, loff_t *ppos)
+{
+	char buf[16];
+	unsigned int base_slice;
+
+	if (cnt > 15)
+		cnt = 15;
+
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+	buf[cnt] = '\0';
+
+	if (kstrtouint(buf, 10, &base_slice))
+		return -EINVAL;
+
+	base_slice = clamp_t(u64, base_slice,
+					NSEC_PER_MSEC/10,   /* HZ=1000 * 10 */
+					NSEC_PER_MSEC*100); /* HZ=100  / 10 */
+
+	sysctl_sched_base_slice = base_slice;
+
+	sched_update_scaling_or_slice();
+
+	*ppos += cnt;
+	return cnt;
+}
+
+static int sched_base_slice_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "%d\n", sysctl_sched_base_slice);
+	return 0;
+}
+
+static int sched_base_slice_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_base_slice_show, NULL);
+}
+
+static const struct file_operations sched_base_slice_fops = {
+	.open		= sched_base_slice_open,
+	.write		= sched_base_slice_write,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 #ifdef CONFIG_SMP
 
 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
@@ -188,8 +235,7 @@ static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
 		return -EINVAL;
 
 	sysctl_sched_tunable_scaling = scaling;
-	if (sched_update_scaling())
-		return -EINVAL;
+	sched_update_scaling_or_slice();
 
 	*ppos += cnt;
 	return cnt;
@@ -505,7 +551,7 @@ static __init int sched_init_debug(void)
 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
 #endif
 
-	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
+	debugfs_create_file("base_slice_ns", 0644, debugfs_sched, NULL, &sched_base_slice_fops);
 
 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 34e7d09320f7..0fe7c4530a87 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -981,8 +981,8 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 /**************************************************************
  * Scheduling class statistics methods:
  */
-#ifdef CONFIG_SMP
-int sched_update_scaling(void)
+
+void sched_update_scaling_or_slice(void)
 {
 	unsigned int factor = get_update_sysctl_factor();
 
@@ -990,10 +990,8 @@ int sched_update_scaling(void)
 	(normalized_sysctl_##name = sysctl_##name / (factor))
 	WRT_SYSCTL(sched_base_slice);
 #undef WRT_SYSCTL
-
-	return 0;
 }
-#endif
+
 #endif
 
 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c7cf4cc57cdd..3ac73cbd711f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1923,6 +1923,8 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
 
 #endif /* !CONFIG_NUMA_BALANCING */
 
+extern void sched_update_scaling_or_slice(void);
+
 #ifdef CONFIG_SMP
 
 static inline void
@@ -2088,8 +2090,6 @@ static inline void update_sched_domain_debugfs(void) { }
 static inline void dirty_sched_domain_sysctl(int cpu) { }
 #endif
 
-extern int sched_update_scaling(void);
-
 static inline const struct cpumask *task_user_cpus(struct task_struct *p)
 {
 	if (!p->user_cpus_ptr)
-- 
2.33.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ