[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240626201129.272750-3-lkarpins@redhat.com>
Date: Wed, 26 Jun 2024 16:07:49 -0400
From: Lucas Karpinski <lkarpins@...hat.com>
To: viro@...iv.linux.org.uk,
brauner@...nel.org,
jack@...e.cz
Cc: raven@...maw.net,
linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org,
Lucas Karpinski <lkarpins@...hat.com>,
Alexander Larsson <alexl@...hat.com>,
Eric Chanudet <echanude@...hat.com>,
Ian Kent <ikent@...hat.com>
Subject: [RFC v3 1/1] fs/namespace: remove RCU sync for MNT_DETACH umount
When detaching (MNT_DETACH) a filesystem, it should not be necessary to
wait for the grace period before completing the syscall. The
expectation that the filesystem is shut down by the time the syscall
returns does not apply in this case. The synchronize_expedited() is not
needed in the lazy umount case, so don't use it.
Without patch, on 6.10-rc2-rt kernel:
perf stat -r 10 --null --pre 'mount -t tmpfs tmpfs mnt' -- umount mnt
0.07333 +- 0.00615 seconds time elapsed ( +- 8.38% )
perf stat -r 10 --null --pre 'mount -t tmpfs tmpfs mnt' -- umount -l mnt
0.07229 +- 0.00672 seconds time elapsed ( +- 9.29% )
With patch, on 6.10-rc2-rt kernel:
perf stat -r 10 --null --pre 'mount -t tmpfs tmpfs mnt' -- umount mnt
0.02834 +- 0.00419 seconds time elapsed ( +- 14.78% )
perf stat -r 10 --null --pre 'mount -t tmpfs tmpfs mnt' -- umount -l mnt
0.0029830 +- 0.0000767 seconds time elapsed ( +- 2.57% )
Signed-off-by: Alexander Larsson <alexl@...hat.com>
Signed-off-by: Eric Chanudet <echanude@...hat.com>
Signed-off-by: Lucas Karpinski <lkarpins@...hat.com>
Suggested-by: Ian Kent <ikent@...hat.com>
---
fs/namespace.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/fs/namespace.c b/fs/namespace.c
index 5a51315c6678..5d889e05dd14 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -78,6 +78,7 @@ static struct kmem_cache *mnt_cache __ro_after_init;
static DECLARE_RWSEM(namespace_sem);
static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
+static bool lazy_unlock = false; /* protected by namespace_sem */
struct mount_kattr {
unsigned int attr_set;
@@ -1555,6 +1556,7 @@ EXPORT_SYMBOL(may_umount);
static void namespace_unlock(void)
{
+ bool lazy;
struct hlist_head head;
struct hlist_node *p;
struct mount *m;
@@ -1563,6 +1565,9 @@ static void namespace_unlock(void)
hlist_move_list(&unmounted, &head);
list_splice_init(&ex_mountpoints, &list);
+ lazy = lazy_unlock;
+ lazy_unlock = false;
+
up_write(&namespace_sem);
shrink_dentry_list(&list);
@@ -1570,7 +1575,8 @@ static void namespace_unlock(void)
if (likely(hlist_empty(&head)))
return;
- synchronize_rcu_expedited();
+ if (!lazy)
+ synchronize_rcu_expedited();
hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
hlist_del(&m->mnt_umount);
@@ -1798,6 +1804,7 @@ static int do_umount(struct mount *mnt, int flags)
}
out:
unlock_mount_hash();
+ lazy_unlock = flags & MNT_DETACH ? true : false;
namespace_unlock();
return retval;
}
--
2.45.2
Powered by blists - more mailing lists