[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220504095123.295859-5-imran.f.khan@oracle.com>
Date: Wed, 4 May 2022 19:51:22 +1000
From: Imran Khan <imran.f.khan@...cle.com>
To: tj@...nel.org, gregkh@...uxfoundation.org, viro@...iv.linux.org.uk
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH v2 4/5] kernfs: Introduce interface to access global kernfs_open_file_mutex.
This allows to change underlying mutex locking, without needing to change
the users of the lock. For example next patch modifies this interface to
use hashed mutexes in place of a single global kernfs_open_file_mutex.
Signed-off-by: Imran Khan <imran.f.khan@...cle.com>
---
fs/kernfs/file.c | 50 +++++++++++++++++++++++++++++++++---------------
1 file changed, 35 insertions(+), 15 deletions(-)
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 96c8493003b6..7a60074ec0a0 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -49,6 +49,22 @@ struct kernfs_open_node {
static LLIST_HEAD(kernfs_notify_list);
+static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
+{
+ return &kernfs_open_file_mutex;
+}
+
+static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
+{
+ struct mutex *lock;
+
+ lock = kernfs_open_file_mutex_ptr(kn);
+
+ mutex_lock(lock);
+
+ return lock;
+}
+
/*
* Raw deref RCU protected kn->attr.open.
* The caller guarantees that @on will not vanish in the middle of this
@@ -67,7 +83,7 @@ static struct kernfs_open_node *kernfs_deref_on_raw(struct kernfs_node *kn)
static struct kernfs_open_node *kernfs_deref_on_protected(struct kernfs_node *kn)
{
return rcu_dereference_protected(kn->attr.open,
- lockdep_is_held(&kernfs_open_file_mutex));
+ lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
}
/*
@@ -78,7 +94,7 @@ static struct kernfs_open_node *kernfs_deref_on_protected(struct kernfs_node *kn
static struct kernfs_open_node *kernfs_check_on_protected(struct kernfs_node *kn)
{
return rcu_dereference_check(kn->attr.open,
- lockdep_is_held(&kernfs_open_file_mutex));
+ lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
}
static struct kernfs_open_file *kernfs_of(struct file *file)
@@ -551,19 +567,20 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on, *new_on = NULL;
+ struct mutex *mutex = NULL;
- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_on_protected(kn);
if (on) {
list_add_tail(&of->list, &on->files);
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return 0;
} else {
/* not there, initialize a new one */
new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
if (!new_on) {
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return -ENOMEM;
}
atomic_set(&new_on->event, 1);
@@ -572,7 +589,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
list_add_tail(&of->list, &new_on->files);
rcu_assign_pointer(kn->attr.open, new_on);
}
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return 0;
}
@@ -594,12 +611,13 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on;
+ struct mutex *mutex = NULL;
- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_deref_on_protected(kn);
if (!on) {
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return;
}
@@ -611,7 +629,7 @@ static void kernfs_unlink_open_file(struct kernfs_node *kn,
kfree_rcu(on, rcu_head);
}
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}
static int kernfs_fop_open(struct inode *inode, struct file *file)
@@ -753,7 +771,7 @@ static void kernfs_release_file(struct kernfs_node *kn,
* here because drain path may be called from places which can
* cause circular dependency.
*/
- lockdep_assert_held(&kernfs_open_file_mutex);
+ lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));
if (!of->released) {
/*
@@ -770,11 +788,12 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
{
struct kernfs_node *kn = inode->i_private;
struct kernfs_open_file *of = kernfs_of(filp);
+ struct mutex *mutex = NULL;
if (kn->flags & KERNFS_HAS_RELEASE) {
- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
kernfs_release_file(kn, of);
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}
kernfs_unlink_open_file(kn, of);
@@ -789,6 +808,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
struct kernfs_open_file *of;
+ struct mutex *mutex = NULL;
if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
return;
@@ -804,10 +824,10 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
if (!rcu_access_pointer(kn->attr.open))
return;
- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
on = kernfs_check_on_protected(kn);
if (!on) {
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
return;
}
@@ -821,7 +841,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
kernfs_release_file(kn, of);
}
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}
/*
--
2.30.2
Powered by blists - more mailing lists