[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220317072612.163143-5-imran.f.khan@oracle.com>
Date: Thu, 17 Mar 2022 18:26:08 +1100
From: Imran Khan <imran.f.khan@...cle.com>
To: tj@...nel.org, viro@...iv.linux.org.uk
Cc: gregkh@...uxfoundation.org, akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org
Subject: [RESEND PATCH v7 4/8] kernfs: Replace global kernfs_open_node_lock with hashed spinlocks.
In current kernfs design a single spinlock, kernfs_open_node_lock, protects
the kernfs_node->attr.open i.e kernfs_open_node instances corresponding to
a sysfs attribute. So separate tasks, which are opening or closing separate
sysfs files, can contend on this spinlock. The contention is more apparent
in large scale systems with few hundred CPUs where most of the CPUs have
running tasks that are opening, accessing or closing sysfs files at any
point of time.
Using hashed spinlocks in place of a single global spinlock, can reduce
contention around global spinlock and hence provide better scalability.
Moreover as these hashed spinlocks are not part of kernfs_node objects we
will not see any singnificant change in memory utilization of kernfs based
file systems like sysfs, cgroupfs etc.
Modify interface introduced in previous patch to make use of hashed
spinlocks. Use kernfs_node address as hashing key.
Signed-off-by: Imran Khan <imran.f.khan@...cle.com>
---
fs/kernfs/file.c | 9 ---------
fs/kernfs/kernfs-internal.h | 6 +++---
fs/kernfs/mount.c | 4 +++-
include/linux/kernfs.h | 10 +++++++++-
4 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 1658bfa048df..95426df9f030 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -18,15 +18,6 @@
#include "kernfs-internal.h"
-/*
- * kernfs_node->attr.open points to kernfs_open_node. attr.open is
- * protected by kernfs_open_node_lock.
- *
- * filp->private_data points to seq_file whose ->private points to
- * kernfs_open_file.
- */
-DEFINE_SPINLOCK(kernfs_open_node_lock);
-
struct kernfs_open_node {
atomic_t refcnt;
atomic_t event;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 60404a93c28a..25c3329bd60e 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -169,8 +169,6 @@ extern const struct inode_operations kernfs_symlink_iops;
*/
extern struct kernfs_global_locks *kernfs_locks;
-extern spinlock_t kernfs_open_node_lock;
-
static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
{
int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS);
@@ -191,7 +189,9 @@ static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
static inline spinlock_t *kernfs_open_node_spinlock_ptr(struct kernfs_node *kn)
{
- return &kernfs_open_node_lock;
+ int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS);
+
+ return &kernfs_locks->open_node_locks[idx].lock;
}
static inline spinlock_t *kernfs_open_node_spinlock(struct kernfs_node *kn)
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index fa3fa22c95b2..809b738739b1 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -395,8 +395,10 @@ void __init kernfs_lock_init(void)
kernfs_locks = kmalloc(sizeof(struct kernfs_global_locks), GFP_KERNEL);
WARN_ON(!kernfs_locks);
- for (count = 0; count < NR_KERNFS_LOCKS; count++)
+ for (count = 0; count < NR_KERNFS_LOCKS; count++) {
mutex_init(&kernfs_locks->open_file_mutex[count].lock);
+ spin_lock_init(&kernfs_locks->open_node_locks[count].lock);
+ }
}
void __init kernfs_init(void)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 1de54f4bdcc5..e82e57c007e9 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -19,6 +19,7 @@
#include <linux/wait.h>
#include <linux/rwsem.h>
#include <linux/cache.h>
+#include <linux/spinlock.h>
struct file;
struct dentry;
@@ -75,20 +76,27 @@ struct kernfs_iattrs;
* kernfs_open_file.
* kernfs_open_files are chained at kernfs_open_node->files, which is
* protected by kernfs_open_file_mutex.lock.
+ *
+ * kernfs_node->attr.open points to kernfs_open_node. attr.open is
+ * protected by kernfs_open_node_lock.lock.
*/
struct kernfs_open_file_mutex {
struct mutex lock;
} ____cacheline_aligned_in_smp;
+struct kernfs_open_node_lock {
+ spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
/*
* To reduce possible contention in sysfs access, arising due to single
* locks, use an array of locks and use kernfs_node object address as
* hash keys to get the index of these locks.
*/
-
struct kernfs_global_locks {
struct kernfs_open_file_mutex open_file_mutex[NR_KERNFS_LOCKS];
+ struct kernfs_open_node_lock open_node_locks[NR_KERNFS_LOCKS];
};
enum kernfs_node_type {
--
2.30.2
Powered by blists - more mailing lists