[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190822165125.GW2369@hirez.programming.kicks-ass.net>
Date: Thu, 22 Aug 2019 18:51:25 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Alessio Balsini <balsini@...roid.com>
Cc: mingo@...nel.org, juri.lelli@...hat.com,
linux-kernel@...r.kernel.org, dietmar.eggemann@....com,
luca.abeni@...tannapisa.it, bristot@...hat.com, dvyukov@...gle.com,
tglx@...utronix.de, vpillai@...italocean.com, rostedt@...dmis.org,
kernel-team@...roid.com
Subject: Re: [RFC][PATCH 01/13] sched/deadline: Impose global limits on
sched_attr::sched_period
On Thu, Aug 22, 2019 at 01:29:49PM +0100, Alessio Balsini wrote:
> Yes, that's exactly what I meant!
> What about this refactoring?
Makes sense; and now I spot a race (and sched_rt_handler() from which I
copied this is susceptible too):
> +int sched_dl_period_handler(struct ctl_table *table, int write,
> + void __user *buffer, size_t *lenp,
> + loff_t *ppos)
> +{
> + unsigned int old_max, old_min;
> + static DEFINE_MUTEX(mutex);
> + int ret;
> +
> + mutex_lock(&mutex);
> + old_max = sysctl_sched_dl_period_max;
> + old_min = sysctl_sched_dl_period_min;
> +
> + ret = proc_douintvec(table, write, buffer, lenp, ppos);
Here the sysctl_* values have been changed, interleave with:
> + if (!ret && write) {
> + u64 max = (u64)sysctl_sched_dl_period_max * NSEC_PER_USEC;
> + u64 min = (u64)sysctl_sched_dl_period_min * NSEC_PER_USEC;
> +
> + if (min < 1ULL << DL_SCALE || max < min) {
> + sysctl_sched_dl_period_max = old_max;
> + sysctl_sched_dl_period_min = old_min;
> + ret = -EINVAL;
> + }
> + }
> + mutex_unlock(&mutex);
> +
> + return ret;
> +}
> @@ -2675,12 +2713,21 @@ bool __checkparam_dl(const struct sched_attr *attr)
> attr->sched_period & (1ULL << 63))
> return false;
>
> + period = attr->sched_period;
> + if (!period)
> + period = attr->sched_deadline;
> +
> /* runtime <= deadline <= period (if period != 0) */
> - if ((attr->sched_period != 0 &&
> - attr->sched_period < attr->sched_deadline) ||
> + if (period < attr->sched_deadline ||
> attr->sched_deadline < attr->sched_runtime)
> return false;
>
> + max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
> + min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
this, and we're using unvalidated numbers.
> + if (period < min || period > max)
> + return false;
> +
> return true;
> }
Something like the completely untested (again, sorry) below ought to
cure that I think; the same needs doing to sched_rt_handler() I'm
thinking.
---
Subject: sched/deadline: Impose global limits on sched_attr::sched_period
From: Alessio Balsini <balsini@...roid.com>
Date: Thu, 22 Aug 2019 13:29:49 +0100
There are two DoS scenarios with SCHED_DEADLINE related to
sched_attr::sched_period:
- since access-control only looks at utilization and density, a very
large period can allow a very large runtime, which in turn can
incur a very large latency to lower priority tasks.
- for very short periods we can end up spending more time programming
the hardware timer than actually running the task.
Mitigate these by imposing limits on the period.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Alessio Balsini <balsini@...roid.com>
Cc: rostedt@...dmis.org
Cc: mingo@...nel.org
Cc: luca.abeni@...tannapisa.it
Cc: bristot@...hat.com
Cc: vpillai@...italocean.com
Cc: kernel-team@...roid.com
Cc: juri.lelli@...hat.com
Cc: dietmar.eggemann@....com
Cc: dvyukov@...gle.com
Cc: tglx@...utronix.de
---
include/linux/sched/sysctl.h | 7 +++++
kernel/sched/deadline.c | 58 +++++++++++++++++++++++++++++++++++++++++--
kernel/sysctl.c | 14 ++++++++++
3 files changed, 77 insertions(+), 2 deletions(-)
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -56,6 +56,13 @@ int sched_proc_update_handler(struct ctl
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
+extern unsigned int sysctl_sched_dl_period_max;
+extern unsigned int sysctl_sched_dl_period_min;
+
+extern int sched_dl_period_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
#ifdef CONFIG_UCLAMP_TASK
extern unsigned int sysctl_sched_uclamp_util_min;
extern unsigned int sysctl_sched_uclamp_util_max;
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2633,6 +2633,49 @@ void __getparam_dl(struct task_struct *p
}
/*
+ * Default limits for DL period: on the top end we guard against small util
+ * tasks still getting ridiculous long effective runtimes, on the bottom end we
+ * guard against timer DoS.
+ */
+unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
+unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
+
+int sched_dl_period_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ unsigned int new_max, new_min;
+ struct ctl_table new_table;
+ static DEFINE_MUTEX(mutex);
+ int ret;
+
+ mutex_lock(&mutex);
+ new_max = sysctl_sched_dl_period_max;
+ new_min = sysctl_sched_dl_period_min;
+ new_table = *table;
+ if (new_table.data == &sysctl_sched_dl_period_max)
+ new_table.data = &new_max;
+ else
+ new_table.data = &new_min;
+
+ ret = proc_douintvec(&new_table, write, buffer, lenp, ppos);
+ if (!ret && write) {
+ u64 max = (u64)new_max * NSEC_PER_USEC;
+ u64 min = (u64)new_min * NSEC_PER_USEC;
+
+ if (min > 1ULL << DL_SCALE && max > min) {
+ WRITE_ONCE(sysctl_sched_dl_period_max, new_max);
+ WRITE_ONCE(sysctl_sched_dl_period_min, new_min);
+ } else {
+ ret = -EINVAL;
+ }
+ }
+ mutex_unlock(&mutex);
+
+ return ret;
+}
+
+/*
* This function validates the new parameters of a -deadline task.
* We ask for the deadline not being zero, and greater or equal
* than the runtime, as well as the period of being zero or
@@ -2644,6 +2687,8 @@ void __getparam_dl(struct task_struct *p
*/
bool __checkparam_dl(const struct sched_attr *attr)
{
+ u64 period, max, min;
+
/* special dl tasks don't actually use any parameter */
if (attr->sched_flags & SCHED_FLAG_SUGOV)
return true;
@@ -2667,12 +2712,21 @@ bool __checkparam_dl(const struct sched_
attr->sched_period & (1ULL << 63))
return false;
+ period = attr->sched_period;
+ if (!period)
+ period = attr->sched_deadline;
+
/* runtime <= deadline <= period (if period != 0) */
- if ((attr->sched_period != 0 &&
- attr->sched_period < attr->sched_deadline) ||
+ if (period < attr->sched_deadline ||
attr->sched_deadline < attr->sched_runtime)
return false;
+ max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
+ min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
+
+ if (period < min || period > max)
+ return false;
+
return true;
}
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -443,6 +443,20 @@ static struct ctl_table kern_table[] = {
.proc_handler = sched_rt_handler,
},
{
+ .procname = "sched_deadline_period_max_us",
+ .data = &sysctl_sched_dl_period_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_dl_period_handler,
+ },
+ {
+ .procname = "sched_deadline_period_min_us",
+ .data = &sysctl_sched_dl_period_min,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_dl_period_handler,
+ },
+ {
.procname = "sched_rr_timeslice_ms",
.data = &sysctl_sched_rr_timeslice,
.maxlen = sizeof(int),
Powered by blists - more mailing lists