[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230119205521.497401-2-echanude@redhat.com>
Date: Thu, 19 Jan 2023 15:55:21 -0500
From: Eric Chanudet <echanude@...hat.com>
To: Alexander Viro <viro@...iv.linux.org.uk>
Cc: linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
Alexander Larsson <alexl@...hat.com>,
Andrew Halaney <ahalaney@...hat.com>,
Eric Chanudet <echanude@...hat.com>
Subject: [RFC PATCH 1/1] fs/namespace: defer free_mount from namespace_unlock
From: Alexander Larsson <alexl@...hat.com>
Use call_rcu to defer releasing the umount'ed or detached filesystem
when calling namepsace_unlock().
Calling synchronize_rcu_expedited() has a significant cost on RT kernel
that default to rcupdate.rcu_normal_after_boot=1.
For example, on a 6.2-rt1 kernel:
perf stat -r 10 --null --pre 'mount -t tmpfs tmpfs mnt' -- umount mnt
0.07464 +- 0.00396 seconds time elapsed ( +- 5.31% )
With this change applied:
perf stat -r 10 --null --pre 'mount -t tmpfs tmpfs mnt' -- umount mnt
0.00162604 +- 0.00000637 seconds time elapsed ( +- 0.39% )
Waiting for the grace period before completing the syscall does not seem
mandatory. The struct mount umount'ed are queued up for release in a
separate list and no longer accessible to following syscalls.
Signed-off-by: Alexander Larsson <alexl@...hat.com>
Signed-off-by: Eric Chanudet <echanude@...hat.com>
---
fs/namespace.c | 42 +++++++++++++++++++++++++++++++++++-------
1 file changed, 35 insertions(+), 7 deletions(-)
diff --git a/fs/namespace.c b/fs/namespace.c
index ab467ee58341..11d219a6e83c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -44,6 +44,11 @@ static unsigned int m_hash_shift __read_mostly;
static unsigned int mp_hash_mask __read_mostly;
static unsigned int mp_hash_shift __read_mostly;
+struct mount_delayed_release {
+ struct rcu_head rcu;
+ struct hlist_head release_list;
+};
+
static __initdata unsigned long mhash_entries;
static int __init set_mhash_entries(char *str)
{
@@ -1582,11 +1587,31 @@ int may_umount(struct vfsmount *mnt)
EXPORT_SYMBOL(may_umount);
-static void namespace_unlock(void)
+static void free_mounts(struct hlist_head *mount_list)
{
- struct hlist_head head;
struct hlist_node *p;
struct mount *m;
+
+ hlist_for_each_entry_safe(m, p, mount_list, mnt_umount) {
+ hlist_del(&m->mnt_umount);
+ mntput(&m->mnt);
+ }
+}
+
+static void delayed_mount_release(struct rcu_head *head)
+{
+ struct mount_delayed_release *drelease =
+ container_of(head, struct mount_delayed_release, rcu);
+
+ free_mounts(&drelease->release_list);
+ kfree(drelease);
+}
+
+static void namespace_unlock(void)
+{
+ struct hlist_head head;
+ struct mount_delayed_release *drelease;
+
LIST_HEAD(list);
hlist_move_list(&unmounted, &head);
@@ -1599,12 +1624,15 @@ static void namespace_unlock(void)
if (likely(hlist_empty(&head)))
return;
- synchronize_rcu_expedited();
-
- hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
- hlist_del(&m->mnt_umount);
- mntput(&m->mnt);
+ drelease = kmalloc(sizeof(*drelease), GFP_KERNEL);
+ if (unlikely(!drelease)) {
+ synchronize_rcu_expedited();
+ free_mounts(&head);
+ return;
}
+
+ hlist_move_list(&head, &drelease->release_list);
+ call_rcu(&drelease->rcu, delayed_mount_release);
}
static inline void namespace_lock(void)
--
2.39.0
Powered by blists - more mailing lists