[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <151019772760.30101.8513274540570798315.stgit@noble>
Date: Thu, 09 Nov 2017 14:22:07 +1100
From: NeilBrown <neilb@...e.com>
To: Al Viro <viro@...iv.linux.org.uk>
Cc: linux-fsdevel@...r.kernel.org,
Linus Torvalds <torvalds@...ux-foundation.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/3] Improve fairness when locking the per-superblock s_anon
list
bit-spin-locks, as used for dcache hash chains, are not fair.
This is not a problem for the dcache hash table as different CPUs are
likely to access different entries in the hash table so high contention
is not expected.
However anonymous dentryies (created by NFSD) all live on a single hash
chain "s_anon" and the bitlock on this can be highly contended, resulting
in soft-lockup warnings.
So introduce a per-sb (fair) spinlock and take it before grabing the
bitlock on s_anon. This provides fairness and makes the warnings go away.
Signed-off-by: NeilBrown <neilb@...e.com>
---
fs/dcache.c | 11 ++++++++++-
fs/super.c | 1 +
include/linux/fs.h | 1 +
3 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/fs/dcache.c b/fs/dcache.c
index f90141387f01..d5952306206b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -51,6 +51,8 @@
* - the dcache hash table
* s_anon bl list spinlock protects:
* - the s_anon list (see __d_drop)
+ * dentry->d_sb->s_anon_lock protect:
+ * - the s_anon bl bitlock, ensuring fairness.
* dentry->d_sb->s_dentry_lru_lock protects:
* - the dcache lru lists and counters
* d_lock protects:
@@ -484,7 +486,12 @@ void __d_drop(struct dentry *dentry)
else
b = d_hash(dentry->d_name.hash);
- hlist_bl_lock(b);
+ if (b == &dentry->d_sb->s_anon) {
+ spin_lock(&dentry->d_sb->s_anon_lock);
+ hlist_bl_lock(b);
+ spin_unlock(&dentry->d_sb->s_anon_lock);
+ } else
+ hlist_bl_lock(b);
__hlist_bl_del(&dentry->d_hash);
dentry->d_hash.pprev = NULL;
hlist_bl_unlock(b);
@@ -1965,7 +1972,9 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
spin_lock(&tmp->d_lock);
__d_set_inode_and_type(tmp, inode, add_flags);
hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
+ spin_lock(&tmp->d_sb->s_anon_lock);
hlist_bl_lock(&tmp->d_sb->s_anon);
+ spin_unlock(&tmp->d_sb->s_anon_lock);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
hlist_bl_unlock(&tmp->d_sb->s_anon);
spin_unlock(&tmp->d_lock);
diff --git a/fs/super.c b/fs/super.c
index 994db21f59bf..af644ae93445 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -209,6 +209,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
if (s->s_user_ns != &init_user_ns)
s->s_iflags |= SB_I_NODEV;
INIT_HLIST_NODE(&s->s_instances);
+ spin_lock_init(&s->s_anon_lock);
INIT_HLIST_BL_HEAD(&s->s_anon);
mutex_init(&s->s_sync_lock);
INIT_LIST_HEAD(&s->s_inodes);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 885266aae2d7..9df9bace53fb 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1359,6 +1359,7 @@ struct super_block {
const struct fscrypt_operations *s_cop;
+ spinlock_t s_anon_lock; /* needed for fairness */
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
Powered by blists - more mailing lists