[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <153421901212.24426.17984275454924451738.stgit@noble>
Date: Tue, 14 Aug 2018 13:56:52 +1000
From: NeilBrown <neilb@...e.com>
To: Jeff Layton <jlayton@...nel.org>,
Alexander Viro <viro@...iv.linux.org.uk>
Cc: "J. Bruce Fields" <bfields@...ldses.org>,
Martin Wilck <mwilck@...e.de>, linux-fsdevel@...r.kernel.org,
Frank Filz <ffilzlnx@...dspring.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 5/5] fs/locks: create a tree of dependent requests.
When we find an existing lock which conflicts with a request,
and the request wants to wait, we currently add the request
to a list. When the lock is removed, the whole list is woken.
This can cause the thundering-herd problem.
To reduce the problem, we make use of the (new) fact that
a pending request can itself have a list of blocked requests.
When we find a conflict, we look through the existing blocked requests.
If any one of them blocks the new request, the new request is attached
below that request, otherwise it is added to the list of blocked
requests, which are now known to be mutually non-conflicting.
This way, when the lock is released, only a set of non-conflicting
locks will be woken, the rest can stay asleep.
If the lock request cannot be granted and the request needs to be
requeued, all the other requests it blocks will then be woken
Reported-and-tested-by: Martin Wilck <mwilck@...e.de>
Signed-off-by: NeilBrown <neilb@...e.com>
---
fs/locks.c | 29 +++++++++++++++++++++++------
1 file changed, 23 insertions(+), 6 deletions(-)
diff --git a/fs/locks.c b/fs/locks.c
index c7a372cebff1..af250afceff4 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -727,11 +727,25 @@ static void locks_delete_block(struct file_lock *waiter)
* fl_blocked list itself is protected by the blocked_lock_lock, but by ensuring
* that the flc_lock is also held on insertions we can avoid taking the
* blocked_lock_lock in some cases when we see that the fl_blocked list is empty.
+ *
+ * Rather than just adding to the list, we check for conflicts with any existing
+ * waiters, and add beneath any waiter that blocks the new waiter.
+ * Thus wakeups don't happen until needed.
*/
static void __locks_insert_block(struct file_lock *blocker,
- struct file_lock *waiter)
+ struct file_lock *waiter,
+ bool conflict(struct file_lock *,
+ struct file_lock *))
{
+ struct file_lock *fl;
BUG_ON(!list_empty(&waiter->fl_block));
+
+new_blocker:
+ list_for_each_entry(fl, &blocker->fl_blocked, fl_block)
+ if (conflict(fl, waiter)) {
+ blocker = fl;
+ goto new_blocker;
+ }
waiter->fl_blocker = blocker;
list_add_tail(&waiter->fl_block, &blocker->fl_blocked);
if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
@@ -746,10 +760,12 @@ static void __locks_insert_block(struct file_lock *blocker,
/* Must be called with flc_lock held. */
static void locks_insert_block(struct file_lock *blocker,
- struct file_lock *waiter)
+ struct file_lock *waiter,
+ bool conflict(struct file_lock *,
+ struct file_lock *))
{
spin_lock(&blocked_lock_lock);
- __locks_insert_block(blocker, waiter);
+ __locks_insert_block(blocker, waiter, conflict);
spin_unlock(&blocked_lock_lock);
}
@@ -1008,7 +1024,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
if (!(request->fl_flags & FL_SLEEP))
goto out;
error = FILE_LOCK_DEFERRED;
- locks_insert_block(fl, request);
+ locks_insert_block(fl, request, flock_locks_conflict);
goto out;
}
if (request->fl_flags & FL_ACCESS)
@@ -1082,7 +1098,8 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
spin_lock(&blocked_lock_lock);
if (likely(!posix_locks_deadlock(request, fl))) {
error = FILE_LOCK_DEFERRED;
- __locks_insert_block(fl, request);
+ __locks_insert_block(fl, request,
+ posix_locks_conflict);
}
spin_unlock(&blocked_lock_lock);
goto out;
@@ -1555,7 +1572,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
break_time -= jiffies;
if (break_time == 0)
break_time++;
- locks_insert_block(fl, new_fl);
+ locks_insert_block(fl, new_fl, leases_conflict);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
Powered by blists - more mailing lists