[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220603173455.441537-1-tadeusz.struk@linaro.org>
Date: Fri, 3 Jun 2022 10:34:55 -0700
From: Tadeusz Struk <tadeusz.struk@...aro.org>
To: Tejun Heo <tj@...nel.org>
Cc: Tadeusz Struk <tadeusz.struk@...aro.org>,
Michal Koutny <mkoutny@...e.com>,
Zefan Li <lizefan.x@...edance.com>,
Johannes Weiner <hannes@...xchg.org>,
Christian Brauner <brauner@...nel.org>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>, cgroups@...r.kernel.org,
netdev@...r.kernel.org, bpf@...r.kernel.org,
stable@...r.kernel.org, linux-kernel@...r.kernel.org,
syzbot+e42ae441c3b10acf9e9d@...kaller.appspotmail.com
Subject: [PATCH] cgroup: serialize css kill and release paths
Syzbot found a corrupted list bug scenario that can be triggered from
cgroup_subtree_control_write(cgrp). The reproduces writes to
cgroup.subtree_control file, which invokes:
cgroup_apply_control_enable()->css_create()->css_populate_dir(), which
then fails with a fault injected -ENOMEM.
In such scenario the css_killed_work_fn will be en-queued via
cgroup_apply_control_disable(cgrp)->kill_css(css), and bail out to
cgroup_kn_unlock(). Then cgroup_kn_unlock() will call:
cgroup_put(cgrp)->css_put(&cgrp->self), which will try to enqueue
css_release_work_fn for the same css instance, causing a list_add
corruption bug, as can be seen in the syzkaller report [1].
Fix this by synchronizing the css ref_kill and css_release jobs.
css_release() function will check if the css_killed_work_fn() has been
scheduled for the css and only en-queue the css_release_work_fn()
if css_killed_work_fn wasn't already en-queued. Otherwise css_release() will
set the CSS_REL_LATER flag for that css. This will cause the css_release_work_fn()
work to be executed after css_killed_work_fn() is finished.
Two scc flags have been introduced to implement this serialization mechanizm:
* CSS_KILL_ENQED, which will be set when css_killed_work_fn() is en-queued, and
* CSS_REL_LATER, which, if set, will cause the css_release_work_fn() to be
scheduled after the css_killed_work_fn is finished.
There is also a new lock, which will protect the integrity of the css flags.
[1] https://syzkaller.appspot.com/bug?id=e26e54d6eac9d9fb50b221ec3e4627b327465dbd
Cc: Tejun Heo <tj@...nel.org>
Cc: Michal Koutny <mkoutny@...e.com>
Cc: Zefan Li <lizefan.x@...edance.com>
Cc: Johannes Weiner <hannes@...xchg.org>
Cc: Christian Brauner <brauner@...nel.org>
Cc: Alexei Starovoitov <ast@...nel.org>
Cc: Daniel Borkmann <daniel@...earbox.net>
Cc: Andrii Nakryiko <andrii@...nel.org>
Cc: Martin KaFai Lau <kafai@...com>
Cc: Song Liu <songliubraving@...com>
Cc: Yonghong Song <yhs@...com>
Cc: John Fastabend <john.fastabend@...il.com>
Cc: KP Singh <kpsingh@...nel.org>
Cc: <cgroups@...r.kernel.org>
Cc: <netdev@...r.kernel.org>
Cc: <bpf@...r.kernel.org>
Cc: <stable@...r.kernel.org>
Cc: <linux-kernel@...r.kernel.org>
Reported-and-tested-by: syzbot+e42ae441c3b10acf9e9d@...kaller.appspotmail.com
Fixes: 8f36aaec9c92 ("cgroup: Use rcu_work instead of explicit rcu and work item")
Signed-off-by: Tadeusz Struk <tadeusz.struk@...aro.org>
---
include/linux/cgroup-defs.h | 4 ++++
kernel/cgroup/cgroup.c | 35 ++++++++++++++++++++++++++++++++---
2 files changed, 36 insertions(+), 3 deletions(-)
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 1bfcfb1af352..8dc8b4edb242 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -53,6 +53,8 @@ enum {
CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
CSS_VISIBLE = (1 << 3), /* css is visible to userland */
CSS_DYING = (1 << 4), /* css is dying */
+ CSS_KILL_ENQED = (1 << 5), /* kill work enqueued for the css */
+ CSS_REL_LATER = (1 << 6), /* release needs to be done after kill */
};
/* bits in struct cgroup flags field */
@@ -162,6 +164,8 @@ struct cgroup_subsys_state {
*/
int id;
+ /* lock to protect flags */
+ spinlock_t lock;
unsigned int flags;
/*
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 1779ccddb734..a0ceead4b390 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5210,8 +5210,23 @@ static void css_release(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- INIT_WORK(&css->destroy_work, css_release_work_fn);
- queue_work(cgroup_destroy_wq, &css->destroy_work);
+ spin_lock_bh(&css->lock);
+
+ /*
+ * Check if the css_killed_work_fn work has been scheduled for this
+ * css and enqueue css_release_work_fn only if it wasn't.
+ * Otherwise set the CSS_REL_LATER flag, which will cause
+ * release to be enqueued after css_killed_work_fn is finished.
+ * This is to prevent list corruption by en-queuing two instance
+ * of the same work struct on the same WQ, namely cgroup_destroy_wq.
+ */
+ if (!(css->flags & CSS_KILL_ENQED)) {
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ } else {
+ css->flags |= CSS_REL_LATER;
+ }
+ spin_unlock_bh(&css->lock);
}
static void init_and_link_css(struct cgroup_subsys_state *css,
@@ -5230,6 +5245,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
INIT_LIST_HEAD(&css->rstat_css_node);
css->serial_nr = css_serial_nr_next++;
atomic_set(&css->online_cnt, 0);
+ spin_lock_init(&css->lock);
if (cgroup_parent(cgrp)) {
css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -5545,10 +5561,12 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
*/
static void css_killed_work_fn(struct work_struct *work)
{
- struct cgroup_subsys_state *css =
+ struct cgroup_subsys_state *css_killed, *css =
container_of(work, struct cgroup_subsys_state, destroy_work);
mutex_lock(&cgroup_mutex);
+ css_killed = css;
+ css_killed->flags &= ~CSS_KILL_ENQED;
do {
offline_css(css);
@@ -5557,6 +5575,14 @@ static void css_killed_work_fn(struct work_struct *work)
css = css->parent;
} while (css && atomic_dec_and_test(&css->online_cnt));
+ spin_lock_bh(&css->lock);
+ if (css_killed->flags & CSS_REL_LATER) {
+ /* If css_release work was delayed for the css enqueue it now. */
+ INIT_WORK(&css_killed->destroy_work, css_release_work_fn);
+ queue_work(cgroup_destroy_wq, &css_killed->destroy_work);
+ css_killed->flags &= ~CSS_REL_LATER;
+ }
+ spin_unlock_bh(&css->lock);
mutex_unlock(&cgroup_mutex);
}
@@ -5566,10 +5592,13 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
+ spin_lock_bh(&css->lock);
if (atomic_dec_and_test(&css->online_cnt)) {
+ css->flags |= CSS_KILL_ENQED;
INIT_WORK(&css->destroy_work, css_killed_work_fn);
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
+ spin_unlock_bh(&css->lock);
}
/**
--
2.36.1
Powered by blists - more mailing lists