[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210107075314.62683-5-wenyang@linux.alibaba.com>
Date: Thu, 7 Jan 2021 15:53:11 +0800
From: Wen Yang <wenyang@...ux.alibaba.com>
To: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Sasha Levin <sashal@...nel.org>
Cc: Xunlei Pang <xlpang@...ux.alibaba.com>,
linux-kernel@...r.kernel.org,
"Eric W. Biederman" <ebiederm@...ssion.com>,
stable@...r.kernel.org, Wen Yang <wenyang@...ux.alibaba.com>
Subject: [PATCH 4.19 4/7] proc: Generalize proc_sys_prune_dcache into proc_prune_siblings_dcache
From: "Eric W. Biederman" <ebiederm@...ssion.com>
[ Upstream commit 26dbc60f385ff9cff475ea2a3bad02e80fd6fa43 ]
This prepares the way for allowing the pid part of proc to use this
dcache pruning code as well.
Signed-off-by: Eric W. Biederman <ebiederm@...ssion.com>
Cc: <stable@...r.kernel.org> # 4.19.x
Signed-off-by: Wen Yang <wenyang@...ux.alibaba.com>
---
fs/proc/inode.c | 38 ++++++++++++++++++++++++++++++++++++++
fs/proc/internal.h | 1 +
fs/proc/proc_sysctl.c | 35 +----------------------------------
3 files changed, 40 insertions(+), 34 deletions(-)
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index e5334ed..fffc7e4 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -112,6 +112,44 @@ void __init proc_init_kmemcache(void)
BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE);
}
+void proc_prune_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock)
+{
+ struct inode *inode;
+ struct proc_inode *ei;
+ struct hlist_node *node;
+ struct super_block *sb;
+
+ rcu_read_lock();
+ for (;;) {
+ node = hlist_first_rcu(inodes);
+ if (!node)
+ break;
+ ei = hlist_entry(node, struct proc_inode, sibling_inodes);
+ spin_lock(lock);
+ hlist_del_init_rcu(&ei->sibling_inodes);
+ spin_unlock(lock);
+
+ inode = &ei->vfs_inode;
+ sb = inode->i_sb;
+ if (!atomic_inc_not_zero(&sb->s_active))
+ continue;
+ inode = igrab(inode);
+ rcu_read_unlock();
+ if (unlikely(!inode)) {
+ deactivate_super(sb);
+ rcu_read_lock();
+ continue;
+ }
+
+ d_prune_aliases(inode);
+ iput(inode);
+ deactivate_super(sb);
+
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+}
+
static int proc_show_options(struct seq_file *seq, struct dentry *root)
{
struct super_block *sb = root->d_sb;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index d922c01..6cae472 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -210,6 +210,7 @@ struct pde_opener {
extern const struct inode_operations proc_pid_link_inode_operations;
void proc_init_kmemcache(void);
+void proc_prune_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock);
void set_proc_pid_nlink(void);
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
extern int proc_fill_super(struct super_block *, void *data, int flags);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 0f578f6..57b16bf 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -264,40 +264,7 @@ static void unuse_table(struct ctl_table_header *p)
static void proc_sys_prune_dcache(struct ctl_table_header *head)
{
- struct inode *inode;
- struct proc_inode *ei;
- struct hlist_node *node;
- struct super_block *sb;
-
- rcu_read_lock();
- for (;;) {
- node = hlist_first_rcu(&head->inodes);
- if (!node)
- break;
- ei = hlist_entry(node, struct proc_inode, sibling_inodes);
- spin_lock(&sysctl_lock);
- hlist_del_init_rcu(&ei->sibling_inodes);
- spin_unlock(&sysctl_lock);
-
- inode = &ei->vfs_inode;
- sb = inode->i_sb;
- if (!atomic_inc_not_zero(&sb->s_active))
- continue;
- inode = igrab(inode);
- rcu_read_unlock();
- if (unlikely(!inode)) {
- deactivate_super(sb);
- rcu_read_lock();
- continue;
- }
-
- d_prune_aliases(inode);
- iput(inode);
- deactivate_super(sb);
-
- rcu_read_lock();
- }
- rcu_read_unlock();
+ proc_prune_siblings_dcache(&head->inodes, &sysctl_lock);
}
/* called under sysctl_lock, will reacquire if has to wait */
--
1.8.3.1
Powered by blists - more mailing lists