[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210910122917.386721069@linuxfoundation.org>
Date: Fri, 10 Sep 2021 14:30:09 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Valentin Schneider <Valentin.Schneider@....com>,
Patrick Schaaf <bof@....de>
Subject: [PATCH 5.4 06/37] kthread: Fix PF_KTHREAD vs to_kthread() race
From: Peter Zijlstra <peterz@...radead.org>
commit 3a7956e25e1d7b3c148569e78895e1f3178122a9 upstream.
The kthread_is_per_cpu() construct relies on only being called on
PF_KTHREAD tasks (per the WARN in to_kthread). This gives rise to the
following usage pattern:
if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
However, as reported by syzcaller, this is broken. The scenario is:
CPU0 CPU1 (running p)
(p->flags & PF_KTHREAD) // true
begin_new_exec()
me->flags &= ~(PF_KTHREAD|...);
kthread_is_per_cpu(p)
to_kthread(p)
WARN(!(p->flags & PF_KTHREAD) <-- *SPLAT*
Introduce __to_kthread() that omits the WARN and is sure to check both
values.
Use this to remove the problematic pattern for kthread_is_per_cpu()
and fix a number of other kthread_*() functions that have similar
issues but are currently not used in ways that would expose the
problem.
Notably kthread_func() is only ever called on 'current', while
kthread_probe_data() is only used for PF_WQ_WORKER, which implies the
task is from kthread_create*().
Fixes: ac687e6e8c26 ("kthread: Extract KTHREAD_IS_PER_CPU")
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Valentin Schneider <Valentin.Schneider@....com>
Link: https://lkml.kernel.org/r/YH6WJc825C4P0FCK@hirez.programming.kicks-ass.net
Signed-off-by: Patrick Schaaf <bof@....de>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
kernel/kthread.c | 43 +++++++++++++++++++++++++++++--------------
kernel/sched/fair.c | 2 +-
2 files changed, 30 insertions(+), 15 deletions(-)
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -76,6 +76,25 @@ static inline struct kthread *to_kthread
return (__force void *)k->set_child_tid;
}
+/*
+ * Variant of to_kthread() that doesn't assume @p is a kthread.
+ *
+ * Per construction; when:
+ *
+ * (p->flags & PF_KTHREAD) && p->set_child_tid
+ *
+ * the task is both a kthread and struct kthread is persistent. However
+ * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
+ * begin_new_exec()).
+ */
+static inline struct kthread *__to_kthread(struct task_struct *p)
+{
+ void *kthread = (__force void *)p->set_child_tid;
+ if (kthread && !(p->flags & PF_KTHREAD))
+ kthread = NULL;
+ return kthread;
+}
+
void free_kthread_struct(struct task_struct *k)
{
struct kthread *kthread;
@@ -176,10 +195,11 @@ void *kthread_data(struct task_struct *t
*/
void *kthread_probe_data(struct task_struct *task)
{
- struct kthread *kthread = to_kthread(task);
+ struct kthread *kthread = __to_kthread(task);
void *data = NULL;
- probe_kernel_read(&data, &kthread->data, sizeof(data));
+ if (kthread)
+ probe_kernel_read(&data, &kthread->data, sizeof(data));
return data;
}
@@ -490,9 +510,9 @@ void kthread_set_per_cpu(struct task_str
set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
}
-bool kthread_is_per_cpu(struct task_struct *k)
+bool kthread_is_per_cpu(struct task_struct *p)
{
- struct kthread *kthread = to_kthread(k);
+ struct kthread *kthread = __to_kthread(p);
if (!kthread)
return false;
@@ -1272,11 +1292,9 @@ EXPORT_SYMBOL(kthread_destroy_worker);
*/
void kthread_associate_blkcg(struct cgroup_subsys_state *css)
{
- struct kthread *kthread;
+ struct kthread *kthread = __to_kthread(current);
+
- if (!(current->flags & PF_KTHREAD))
- return;
- kthread = to_kthread(current);
if (!kthread)
return;
@@ -1298,13 +1316,10 @@ EXPORT_SYMBOL(kthread_associate_blkcg);
*/
struct cgroup_subsys_state *kthread_blkcg(void)
{
- struct kthread *kthread;
+ struct kthread *kthread = __to_kthread(current);
- if (current->flags & PF_KTHREAD) {
- kthread = to_kthread(current);
- if (kthread)
- return kthread->blkcg_css;
- }
+ if (kthread)
+ return kthread->blkcg_css;
return NULL;
}
EXPORT_SYMBOL(kthread_blkcg);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7301,7 +7301,7 @@ int can_migrate_task(struct task_struct
return 0;
/* Disregard pcpu kthreads; they are where they need to be. */
- if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
+ if (kthread_is_per_cpu(p))
return 0;
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
Powered by blists - more mailing lists