[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230429145635.GA1495785@hirez.programming.kicks-ass.net>
Date: Sat, 29 Apr 2023 16:56:35 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: chris hyser <chris.hyser@...cle.com>
Cc: linux-kernel@...r.kernel.org, dietmar.eggemann@....com,
vincent.guittot@...aro.org, Chen Yu <yu.c.chen@...el.com>,
Ingo Molnar <mingo@...nel.org>
Subject: Re: [PATCH v4] sched/numa: Fix divide by zero for
sysctl_numa_balancing_scan_size.
On Thu, Apr 06, 2023 at 11:26:33AM -0400, chris hyser wrote:
> Commit 6419265899d9 ("sched/fair: Fix division by zero
> sysctl_numa_balancing_scan_size") prevented a divide by zero by using
> sysctl mechanisms to return EINVAL for a sysctl_numa_balancing_scan_size
> value of zero. When moved from a sysctl to a debugfs file, this checking
> was lost.
>
> This patch puts zero checking back in place.
>
> Cc: stable@...r.kernel.org
> Fixes: 8a99b6833c88 ("sched: Move SCHED_DEBUG sysctl to debugfs")
> Tested-by: Chen Yu <yu.c.chen@...el.com>
> Signed-off-by: Chris Hyser <chris.hyser@...cle.com>
I suppose.. but is it really worth the hassle? I mean, this is debug
stuff, just don't write 0 in then?
If we do find we want this (why?!) then should we not invest in a better
debugfs_create_u32_minmax() or something so that we don't get to add 40+
lines for everthing we want to add limits on?
> ---
> kernel/sched/debug.c | 44 +++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 43 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
> index 1637b65ba07a..cc6a0172a598 100644
> --- a/kernel/sched/debug.c
> +++ b/kernel/sched/debug.c
> @@ -278,6 +278,48 @@ static const struct file_operations sched_dynamic_fops = {
>
> #endif /* CONFIG_PREEMPT_DYNAMIC */
>
> +#ifdef CONFIG_NUMA_BALANCING
> +
> +static ssize_t sched_numa_scan_write(struct file *filp, const char __user *ubuf,
> + size_t cnt, loff_t *ppos)
> +{
> + int err;
> + unsigned int scan_size;
> +
> + err = kstrtouint_from_user(ubuf, cnt, 10, &scan_size);
> + if (err)
> + return err;
> +
> + if (!scan_size)
> + return -EINVAL;
> +
> + sysctl_numa_balancing_scan_size = scan_size;
> +
> + *ppos += cnt;
> + return cnt;
> +}
> +
> +static int sched_numa_scan_show(struct seq_file *m, void *v)
> +{
> + seq_printf(m, "%d\n", sysctl_numa_balancing_scan_size);
> + return 0;
> +}
> +
> +static int sched_numa_scan_open(struct inode *inode, struct file *filp)
> +{
> + return single_open(filp, sched_numa_scan_show, NULL);
> +}
> +
> +static const struct file_operations sched_numa_scan_fops = {
> + .open = sched_numa_scan_open,
> + .write = sched_numa_scan_write,
> + .read = seq_read,
> + .llseek = seq_lseek,
> + .release = single_release,
> +};
> +
> +#endif /* CONFIG_NUMA_BALANCING */
> +
> __read_mostly bool sched_debug_verbose;
>
> static const struct seq_operations sched_debug_sops;
> @@ -332,7 +374,7 @@ static __init int sched_init_debug(void)
> debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
> debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
> debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
> - debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
> + debugfs_create_file("scan_size_mb", 0644, numa, NULL, &sched_numa_scan_fops);
> debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
> #endif
>
> --
> 2.31.1
>
Powered by blists - more mailing lists