[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <153421901205.24426.3008401633617707303.stgit@noble>
Date: Tue, 14 Aug 2018 13:56:52 +1000
From: NeilBrown <neilb@...e.com>
To: Jeff Layton <jlayton@...nel.org>,
Alexander Viro <viro@...iv.linux.org.uk>
Cc: "J. Bruce Fields" <bfields@...ldses.org>,
Martin Wilck <mwilck@...e.de>, linux-fsdevel@...r.kernel.org,
Frank Filz <ffilzlnx@...dspring.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 3/5] fs/locks: allow a lock request to block other requests.
Currently, a lock can block pending requests, but all pending
requests are equal. If lots of pending requests are
mutually exclusive, this means they will all be woken up
and all but one will fail. This can hurt performance.
So we will allow pending requests to block other requests.
Only the first request will be woken, and it will wake the others.
This patch doesn't implement this fully, but prepares the way.
- It acknowledges that a request might be blocking other requests,
and when the request is converted to a lock, those blocked
requests are moved across.
- When a request is requeued or discarded, all blocked requests are
woken.
- When deadlock-detection looks for the lock which blocks a
given request, we follow the chain of ->fl_blocker all
the way to the top.
Signed-off-by: NeilBrown <neilb@...e.com>
---
fs/locks.c | 43 +++++++++++++++++++++++++++++++++++--------
1 file changed, 35 insertions(+), 8 deletions(-)
diff --git a/fs/locks.c b/fs/locks.c
index de0b9276f23d..9439eebd4eb9 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -408,9 +408,19 @@ void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
fl->fl_ops->fl_copy_lock(new, fl);
}
}
-
EXPORT_SYMBOL(locks_copy_lock);
+static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
+{
+ struct file_lock *f;
+
+ spin_lock(&blocked_lock_lock);
+ list_splice_init(&fl->fl_blocked, &new->fl_blocked);
+ list_for_each_entry(f, &fl->fl_blocked, fl_block)
+ f->fl_blocker = new;
+ spin_unlock(&blocked_lock_lock);
+}
+
static inline int flock_translate_cmd(int cmd) {
if (cmd & LOCK_MAND)
return cmd & (LOCK_MAND | LOCK_RW);
@@ -683,11 +693,15 @@ static void __locks_delete_block(struct file_lock *waiter)
static void __locks_wake_up_blocks(struct file_lock *blocker)
{
+ /* Wake up all requests in blocker->fl_blocked, including
+ * requests blocked by those requests.
+ */
while (!list_empty(&blocker->fl_blocked)) {
struct file_lock *waiter;
waiter = list_first_entry(&blocker->fl_blocked,
struct file_lock, fl_block);
+ list_splice_init(&waiter->fl_blocked, &blocker->fl_blocked);
__locks_delete_block(waiter);
if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
waiter->fl_lmops->lm_notify(waiter);
@@ -699,6 +713,7 @@ static void __locks_wake_up_blocks(struct file_lock *blocker)
static void locks_delete_block(struct file_lock *waiter)
{
spin_lock(&blocked_lock_lock);
+ __locks_wake_up_blocks(waiter);
__locks_delete_block(waiter);
spin_unlock(&blocked_lock_lock);
}
@@ -714,13 +729,19 @@ static void locks_delete_block(struct file_lock *waiter)
* blocked_lock_lock in some cases when we see that the fl_blocked list is empty.
*/
static void __locks_insert_block(struct file_lock *blocker,
- struct file_lock *waiter)
+ struct file_lock *waiter)
{
BUG_ON(!list_empty(&waiter->fl_block));
waiter->fl_blocker = blocker;
list_add_tail(&waiter->fl_block, &blocker->fl_blocked);
if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
locks_insert_global_blocked(waiter);
+
+ /* The requests in waiter->fl_blocked are known to conflict with
+ * waiter, but might not conflict with blocker, or the requests
+ * and lock which block it. So they all need to be woken.
+ */
+ __locks_wake_up_blocks(waiter);
}
/* Must be called with flc_lock held. */
@@ -893,8 +914,11 @@ static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
struct file_lock *fl;
hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
- if (posix_same_owner(fl, block_fl))
- return fl->fl_blocker;
+ if (posix_same_owner(fl, block_fl)) {
+ while (fl->fl_blocker)
+ fl = fl->fl_blocker;
+ return fl;
+ }
}
return NULL;
}
@@ -987,6 +1011,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
if (request->fl_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
+ locks_move_blocks(new_fl, request);
locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
new_fl = NULL;
error = 0;
@@ -1179,6 +1204,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
goto out;
}
locks_copy_lock(new_fl, request);
+ locks_move_blocks(new_fl, request);
locks_insert_lock_ctx(new_fl, &fl->fl_list);
fl = new_fl;
new_fl = NULL;
@@ -2587,13 +2613,14 @@ void locks_remove_file(struct file *filp)
int
posix_unblock_lock(struct file_lock *waiter)
{
- int status = 0;
+ int status = -ENOENT;
spin_lock(&blocked_lock_lock);
- if (waiter->fl_blocker)
+ if (waiter->fl_blocker) {
+ __locks_wake_up_blocks(waiter);
__locks_delete_block(waiter);
- else
- status = -ENOENT;
+ status = 0;
+ }
spin_unlock(&blocked_lock_lock);
return status;
}
Powered by blists - more mailing lists