lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:   Sun, 28 Aug 2016 20:36:02 -0700
From:   Andrei Vagin <avagin@...nvz.org>
To:     Alexander Viro <viro@...iv.linux.org.uk>
Cc:     linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
        Andrei Vagin <avagin@...nvz.org>
Subject: [PATCH] mount: dont execute propagate_umount() many times for same mounts

In a worse case the current complexity of umount_tree() is O(n^3).
* Enumirate all mounts in a target tree (propagate_umount)
* Enumirate mounts to find where these changes have to
  be propagated (mark_umount_candidates)
* Enumirate mounts to find a requered mount by parent and dentry
  (__lookup_mnt_lat)

The worse case is when all mounts from the tree live in the same shared
group. And in this case we have to enumirate all mounts on each step.

Here we can optimize the second step. We don't need to make it for
mounts which we already met when we do this step for previous mounts.
It reduces the complexity of umount_tree() to O(n^2).

Here is a script to generate such mount tree:
$ cat run.sh
mount -t tmpfs xxx /mnt
mount --make-shared /mnt
for i in `seq $1`; do
	mount --bind /mnt `mktemp -d /mnt/test.XXXXXX`
done
time umount -l /mnt
$ for i in `seq 10 16`; do echo $i; unshare -Urm bash ./run.sh $i; done

Here is performance measurements with and without this patch:

mounts | after  | before (sec)
---------------------
1024   | 0.024  | 0.084
2048   | 0.041  | 0.39
4096   | 0.059  | 3.198
8192   | 0.227  | 50.794
16384  | 1.015  | 810

This patch is a first step to fix CVE-2016-6213. The next step will be
to add ucount (user namespace limit) for mounts.

Signed-off-by: Andrei Vagin <avagin@...nvz.org>
---
 fs/mount.h     |  2 ++
 fs/namespace.c | 19 ++++++++++++++++---
 fs/pnode.c     | 23 +++++++++++++++++++++--
 3 files changed, 39 insertions(+), 5 deletions(-)

diff --git a/fs/mount.h b/fs/mount.h
index 14db05d..b5631bd 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -87,6 +87,8 @@ static inline int is_mounted(struct vfsmount *mnt)
 
 extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
 extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
+extern struct mount *__lookup_mnt_cont(struct mount *,
+					struct vfsmount *, struct dentry *);
 
 extern int __legitimize_mnt(struct vfsmount *, unsigned);
 extern bool legitimize_mnt(struct vfsmount *, unsigned);
diff --git a/fs/namespace.c b/fs/namespace.c
index 7bb2cda..924cea7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -649,9 +649,7 @@ struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
 		goto out;
 	if (!(p->mnt.mnt_flags & MNT_UMOUNT))
 		res = p;
-	hlist_for_each_entry_continue(p, mnt_hash) {
-		if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
-			break;
+	for (; p != NULL; p = __lookup_mnt_cont(p, mnt, dentry)) {
 		if (!(p->mnt.mnt_flags & MNT_UMOUNT))
 			res = p;
 	}
@@ -659,6 +657,21 @@ out:
 	return res;
 }
 
+struct mount *__lookup_mnt_cont(struct mount *p,
+				struct vfsmount *mnt, struct dentry *dentry)
+{
+	struct hlist_node *node = p->mnt_hash.next;
+
+	if (!node)
+		return NULL;
+
+	p = hlist_entry(node, struct mount, mnt_hash);
+	if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
+		return NULL;
+
+	return p;
+}
+
 /*
  * lookup_mnt - Return the first child mount mounted at path
  *
diff --git a/fs/pnode.c b/fs/pnode.c
index 9989970..2242aad 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -399,10 +399,24 @@ static void mark_umount_candidates(struct mount *mnt)
 
 	BUG_ON(parent == mnt);
 
+	if (IS_MNT_MARKED(mnt))
+		return;
+
 	for (m = propagation_next(parent, parent); m;
 			m = propagation_next(m, parent)) {
-		struct mount *child = __lookup_mnt_last(&m->mnt,
+		struct mount *child = __lookup_mnt(&m->mnt,
 						mnt->mnt_mountpoint);
+
+		while (child && child->mnt.mnt_flags & MNT_UMOUNT) {
+			/*
+			 * Mark umounted mounts to not call
+			 * __propagate_umount for them again.
+			 */
+			SET_MNT_MARK(child);
+			child = __lookup_mnt_cont(child, &m->mnt,
+							mnt->mnt_mountpoint);
+		}
+
 		if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
 			SET_MNT_MARK(child);
 		}
@@ -420,6 +434,9 @@ static void __propagate_umount(struct mount *mnt)
 
 	BUG_ON(parent == mnt);
 
+	if (IS_MNT_MARKED(mnt))
+		return;
+
 	for (m = propagation_next(parent, parent); m;
 			m = propagation_next(m, parent)) {
 
@@ -431,6 +448,8 @@ static void __propagate_umount(struct mount *mnt)
 		 */
 		if (!child || !IS_MNT_MARKED(child))
 			continue;
+		if (child->mnt.mnt_flags & MNT_UMOUNT)
+			continue;
 		CLEAR_MNT_MARK(child);
 		if (list_empty(&child->mnt_mounts)) {
 			list_del_init(&child->mnt_child);
@@ -454,7 +473,7 @@ int propagate_umount(struct list_head *list)
 	list_for_each_entry_reverse(mnt, list, mnt_list)
 		mark_umount_candidates(mnt);
 
-	list_for_each_entry(mnt, list, mnt_list)
+	list_for_each_entry_reverse(mnt, list, mnt_list)
 		__propagate_umount(mnt);
 	return 0;
 }
-- 
2.5.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ