[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250210143351.31119-1-luis@igalia.com>
Date: Mon, 10 Feb 2025 14:33:51 +0000
From: Luis Henriques <luis@...lia.com>
To: Miklos Szeredi <miklos@...redi.hu>
Cc: linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org,
Matt Harvey <mharvey@...ptrading.com>,
Bernd Schubert <bschubert@....com>,
Luis Henriques <luis@...lia.com>
Subject: [RFC PATCH v3] fuse: add new function to invalidate cache for all inodes
Currently userspace is able to notify the kernel to invalidate the cache
for an inode. This means that, if all the inodes in a filesystem need to
be invalidated, then userspace needs to iterate through all of them and do
this kernel notification separately.
This patch adds a new option that allows userspace to invalidate all the
inodes with a single notification operation. In addition to invalidate all
the inodes, it also shrinks the sb dcache.
Signed-off-by: Luis Henriques <luis@...lia.com>
---
* Changes since v2
Use the new helper from fuse_reverse_inval_inode(), as suggested by Bernd.
Also updated patch description as per checkpatch.pl suggestion.
* Changes since v1
As suggested by Bernd, this patch v2 simply adds an helper function that
will make it easier to replace most of it's code by a call to function
super_iter_inodes() when Dave Chinner's patch[1] eventually gets merged.
[1] https://lore.kernel.org/r/20241002014017.3801899-3-david@fromorbit.com
fs/fuse/inode.c | 67 +++++++++++++++++++++++++++++++++++----
include/uapi/linux/fuse.h | 3 ++
2 files changed, 63 insertions(+), 7 deletions(-)
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e9db2cb8c150..45b9fbb54d42 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -547,25 +547,78 @@ struct inode *fuse_ilookup(struct fuse_conn *fc, u64 nodeid,
return NULL;
}
+static void inval_single_inode(struct inode *inode, struct fuse_conn *fc)
+{
+ struct fuse_inode *fi;
+
+ fi = get_fuse_inode(inode);
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
+ spin_unlock(&fi->lock);
+ fuse_invalidate_attr(inode);
+ forget_all_cached_acls(inode);
+}
+
+static int fuse_reverse_inval_all(struct fuse_conn *fc)
+{
+ struct fuse_mount *fm;
+ struct super_block *sb;
+ struct inode *inode, *old_inode = NULL;
+
+ inode = fuse_ilookup(fc, FUSE_ROOT_ID, NULL);
+ if (!inode)
+ return -ENOENT;
+
+ fm = get_fuse_mount(inode);
+ iput(inode);
+ if (!fm)
+ return -ENOENT;
+ sb = fm->sb;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ spin_lock(&inode->i_lock);
+ if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
+ !atomic_read(&inode->i_count)) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+
+ __iget(inode);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&sb->s_inode_list_lock);
+ iput(old_inode);
+
+ inval_single_inode(inode, fc);
+
+ old_inode = inode;
+ cond_resched();
+ spin_lock(&sb->s_inode_list_lock);
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+ iput(old_inode);
+
+ shrink_dcache_sb(sb);
+
+ return 0;
+}
+
int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid,
loff_t offset, loff_t len)
{
- struct fuse_inode *fi;
struct inode *inode;
pgoff_t pg_start;
pgoff_t pg_end;
+ if (nodeid == FUSE_INVAL_ALL_INODES)
+ return fuse_reverse_inval_all(fc);
+
inode = fuse_ilookup(fc, nodeid, NULL);
if (!inode)
return -ENOENT;
- fi = get_fuse_inode(inode);
- spin_lock(&fi->lock);
- fi->attr_version = atomic64_inc_return(&fc->attr_version);
- spin_unlock(&fi->lock);
+ inval_single_inode(inode, fc);
- fuse_invalidate_attr(inode);
- forget_all_cached_acls(inode);
if (offset >= 0) {
pg_start = offset >> PAGE_SHIFT;
if (len <= 0)
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 5e0eb41d967e..e5852b63f99f 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -669,6 +669,9 @@ enum fuse_notify_code {
FUSE_NOTIFY_CODE_MAX,
};
+/* The nodeid to request to invalidate all inodes */
+#define FUSE_INVAL_ALL_INODES 0
+
/* The read buffer is required to be at least 8k, but may be much larger */
#define FUSE_MIN_READ_BUFFER 8192
Powered by blists - more mailing lists