[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1460768127-31822-11-git-send-email-viro@ZenIV.linux.org.uk>
Date: Sat, 16 Apr 2016 01:55:23 +0100
From: Al Viro <viro@...IV.linux.org.uk>
To: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org
Subject: [PATCH 11/15] beginning of transition to parallel lookups - marking in-lookup dentries
From: Al Viro <viro@...iv.linux.org.uk>
marked as such when (would be) parallel lookup is about to pass them
to actual ->lookup(); unmarked when
* __d_add() is about to make it hashed, positive or not.
* __d_move() (from d_splice_alias(), directly or via
__d_unalias()) puts a preexisting dentry in its place
* in caller of ->lookup() if it has escaped all of the
above. Bug (WARN_ON, actually) if it reaches the final dput()
or d_instantiate() while still marked such.
As the result, we are guaranteed that for as long as the flag is
set, dentry will
* remain negative unhashed with positive refcount
* never have its ->d_alias looked at
* never have its ->d_lru looked at
* never have its ->d_parent and ->d_name changed
Right now we have at most one such for any given parent directory.
With parallel lookups that restriction will weaken to
* only exist when parent is locked shared
* at most one with given (parent,name) pair (comparison of
names is according to ->d_compare())
* only exist when there's no hashed dentry with the same
(parent,name)
Transition will take the next several commits; unfortunately, we'll
only be able to switch to rwsem at the end of this series. The
reason for not making it a single patch is to simplify review.
Signed-off-by: Al Viro <viro@...iv.linux.org.uk>
---
fs/dcache.c | 12 ++++++++++++
fs/namei.c | 4 ++++
include/linux/dcache.h | 13 +++++++++++++
3 files changed, 29 insertions(+)
diff --git a/fs/dcache.c b/fs/dcache.c
index 33cad8a..5cea3cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -761,6 +761,8 @@ repeat:
/* Slow case: now with the dentry lock held */
rcu_read_unlock();
+ WARN_ON(dentry->d_flags & DCACHE_PAR_LOOKUP);
+
/* Unreachable? Get rid of it */
if (unlikely(d_unhashed(dentry)))
goto kill_it;
@@ -1743,6 +1745,7 @@ type_determined:
static void __d_instantiate(struct dentry *dentry, struct inode *inode)
{
unsigned add_flags = d_flags_for_inode(inode);
+ WARN_ON(dentry->d_flags & DCACHE_PAR_LOOKUP);
spin_lock(&dentry->d_lock);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
@@ -2358,12 +2361,19 @@ void d_rehash(struct dentry * entry)
}
EXPORT_SYMBOL(d_rehash);
+void __d_not_in_lookup(struct dentry *dentry)
+{
+ dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
+ /* more stuff will land here */
+}
/* inode->i_lock held if inode is non-NULL */
static inline void __d_add(struct dentry *dentry, struct inode *inode)
{
spin_lock(&dentry->d_lock);
+ if (unlikely(dentry->d_flags & DCACHE_PAR_LOOKUP))
+ __d_not_in_lookup(dentry);
if (inode) {
unsigned add_flags = d_flags_for_inode(inode);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
@@ -2609,6 +2619,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
BUG_ON(d_ancestor(target, dentry));
dentry_lock_for_move(dentry, target);
+ if (unlikely(target->d_flags & DCACHE_PAR_LOOKUP))
+ __d_not_in_lookup(target);
write_seqcount_begin(&dentry->d_seq);
write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
diff --git a/fs/namei.c b/fs/namei.c
index 6fb33a7..0ee8b9d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1634,7 +1634,11 @@ static struct dentry *lookup_slow(const struct qstr *name,
inode_unlock(inode);
return ERR_PTR(-ENOMEM);
}
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_PAR_LOOKUP;
+ spin_unlock(&dentry->d_lock);
old = inode->i_op->lookup(inode, dentry, flags);
+ d_not_in_lookup(dentry);
if (unlikely(old)) {
dput(dentry);
dentry = old;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 7cb043d..cfc1240 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -230,6 +230,8 @@ struct dentry_operations {
#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
+#define DCACHE_PAR_LOOKUP 0x08000000 /* being looked up (with parent locked shared) */
+
extern seqlock_t rename_lock;
/*
@@ -365,6 +367,17 @@ static inline void dont_mount(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
+extern void __d_not_in_lookup(struct dentry *);
+
+static inline void d_not_in_lookup(struct dentry *dentry)
+{
+ if (unlikely(dentry->d_flags & DCACHE_PAR_LOOKUP)) {
+ spin_lock(&dentry->d_lock);
+ __d_not_in_lookup(dentry);
+ spin_unlock(&dentry->d_lock);
+ }
+}
+
extern void dput(struct dentry *);
static inline bool d_managed(const struct dentry *dentry)
--
2.8.0.rc3
Powered by blists - more mailing lists