[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240116-flsplit-v1-16-c9d0f4370a5d@kernel.org>
Date: Tue, 16 Jan 2024 14:46:12 -0500
From: Jeff Layton <jlayton@...nel.org>
To: Christian Brauner <brauner@...nel.org>,
Alexander Viro <viro@...iv.linux.org.uk>,
Eric Van Hensbergen <ericvh@...nel.org>,
Latchesar Ionkov <lucho@...kov.net>,
Dominique Martinet <asmadeus@...ewreck.org>,
Christian Schoenebeck <linux_oss@...debyte.com>,
David Howells <dhowells@...hat.com>, Marc Dionne <marc.dionne@...istor.com>,
Xiubo Li <xiubli@...hat.com>, Ilya Dryomov <idryomov@...il.com>,
Alexander Aring <aahringo@...hat.com>, David Teigland <teigland@...hat.com>,
Miklos Szeredi <miklos@...redi.hu>,
Andreas Gruenbacher <agruenba@...hat.com>,
Trond Myklebust <trond.myklebust@...merspace.com>,
Anna Schumaker <anna@...nel.org>, Chuck Lever <chuck.lever@...cle.com>,
Neil Brown <neilb@...e.de>, Olga Kornievskaia <kolga@...app.com>,
Dai Ngo <Dai.Ngo@...cle.com>, Tom Talpey <tom@...pey.com>,
Jan Kara <jack@...e.cz>, Mark Fasheh <mark@...heh.com>,
Joel Becker <jlbec@...lplan.org>, Joseph Qi <joseph.qi@...ux.alibaba.com>,
Steve French <sfrench@...ba.org>, Paulo Alcantara <pc@...guebit.com>,
Ronnie Sahlberg <lsahlber@...hat.com>,
Shyam Prasad N <sprasad@...rosoft.com>, Namjae Jeon <linkinjeon@...nel.org>,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Steven Rostedt <rostedt@...dmis.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Cc: linux-kernel@...r.kernel.org, v9fs@...ts.linux.dev,
linux-afs@...ts.infradead.org, ceph-devel@...r.kernel.org,
gfs2@...ts.linux.dev, linux-fsdevel@...r.kernel.org,
linux-nfs@...r.kernel.org, ocfs2-devel@...ts.linux.dev,
linux-cifs@...r.kernel.org, samba-technical@...ts.samba.org,
linux-trace-kernel@...r.kernel.org, Jeff Layton <jlayton@...nel.org>
Subject: [PATCH 16/20] filelock: reorganize locks_delete_block and
__locks_insert_block
Rename the old __locks_delete_block to __locks_unlink_lock. Rename
change old locks_delete_block function to __locks_delete_block and
have it take a file_lock_core. Make locks_delete_block a simple wrapper
around __locks_delete_block.
Also, change __locks_insert_block to take struct file_lock_core, and
fix up its callers.
Signed-off-by: Jeff Layton <jlayton@...nel.org>
---
fs/locks.c | 44 +++++++++++++++++++++++---------------------
1 file changed, 23 insertions(+), 21 deletions(-)
diff --git a/fs/locks.c b/fs/locks.c
index 3a028a8aafeb..27160dc65d63 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -682,7 +682,7 @@ static void locks_delete_global_blocked(struct file_lock_core *waiter)
*
* Must be called with blocked_lock_lock held.
*/
-static void __locks_delete_block(struct file_lock_core *waiter)
+static void __locks_unlink_block(struct file_lock_core *waiter)
{
locks_delete_global_blocked(waiter);
list_del_init(&waiter->fl_blocked_member);
@@ -698,7 +698,7 @@ static void __locks_wake_up_blocks(struct file_lock_core *blocker)
struct file_lock_core, fl_blocked_member);
fl = file_lock(waiter);
- __locks_delete_block(waiter);
+ __locks_unlink_block(waiter);
if ((IS_POSIX(waiter) || IS_FLOCK(waiter)) &&
fl->fl_lmops && fl->fl_lmops->lm_notify)
fl->fl_lmops->lm_notify(fl);
@@ -714,16 +714,9 @@ static void __locks_wake_up_blocks(struct file_lock_core *blocker)
}
}
-/**
- * locks_delete_block - stop waiting for a file lock
- * @waiter: the lock which was waiting
- *
- * lockd/nfsd need to disconnect the lock while working on it.
- */
-int locks_delete_block(struct file_lock *waiter_fl)
+static int __locks_delete_block(struct file_lock_core *waiter)
{
int status = -ENOENT;
- struct file_lock_core *waiter = &waiter_fl->fl_core;
/*
* If fl_blocker is NULL, it won't be set again as this thread "owns"
@@ -754,16 +747,27 @@ int locks_delete_block(struct file_lock *waiter_fl)
if (waiter->fl_blocker)
status = 0;
__locks_wake_up_blocks(waiter);
- __locks_delete_block(waiter);
+ __locks_unlink_block(waiter);
/*
* The setting of fl_blocker to NULL marks the "done" point in deleting
* a block. Paired with acquire at the top of this function.
*/
- smp_store_release(waiter->fl_blocker, NULL);
+ smp_store_release(&waiter->fl_blocker, NULL);
spin_unlock(&blocked_lock_lock);
return status;
}
+
+/**
+ * locks_delete_block - stop waiting for a file lock
+ * @waiter: the lock which was waiting
+ *
+ * lockd/nfsd need to disconnect the lock while working on it.
+ */
+int locks_delete_block(struct file_lock *waiter)
+{
+ return __locks_delete_block(&waiter->fl_core);
+}
EXPORT_SYMBOL(locks_delete_block);
/* Insert waiter into blocker's block list.
@@ -781,13 +785,11 @@ EXPORT_SYMBOL(locks_delete_block);
* waiters, and add beneath any waiter that blocks the new waiter.
* Thus wakeups don't happen until needed.
*/
-static void __locks_insert_block(struct file_lock *blocker_fl,
- struct file_lock *waiter_fl,
+static void __locks_insert_block(struct file_lock_core *blocker,
+ struct file_lock_core *waiter,
bool conflict(struct file_lock_core *,
struct file_lock_core *))
{
- struct file_lock_core *blocker = &blocker_fl->fl_core;
- struct file_lock_core *waiter = &waiter_fl->fl_core;
struct file_lock_core *flc;
BUG_ON(!list_empty(&waiter->fl_blocked_member));
@@ -812,8 +814,8 @@ static void __locks_insert_block(struct file_lock *blocker_fl,
}
/* Must be called with flc_lock held. */
-static void locks_insert_block(struct file_lock *blocker,
- struct file_lock *waiter,
+static void locks_insert_block(struct file_lock_core *blocker,
+ struct file_lock_core *waiter,
bool conflict(struct file_lock_core *,
struct file_lock_core *))
{
@@ -1111,7 +1113,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
if (!(request->fl_core.fl_flags & FL_SLEEP))
goto out;
error = FILE_LOCK_DEFERRED;
- locks_insert_block(fl, request, flock_locks_conflict);
+ locks_insert_block(&fl->fl_core, &request->fl_core, flock_locks_conflict);
goto out;
}
if (request->fl_core.fl_flags & FL_ACCESS)
@@ -1205,7 +1207,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
__locks_wake_up_blocks(&request->fl_core);
if (likely(!posix_locks_deadlock(request, fl))) {
error = FILE_LOCK_DEFERRED;
- __locks_insert_block(fl, request,
+ __locks_insert_block(&fl->fl_core, &request->fl_core,
posix_locks_conflict);
}
spin_unlock(&blocked_lock_lock);
@@ -1598,7 +1600,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
break_time -= jiffies;
if (break_time == 0)
break_time++;
- locks_insert_block(fl, new_fl, leases_conflict);
+ locks_insert_block(&fl->fl_core, &new_fl->fl_core, leases_conflict);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
--
2.43.0
Powered by blists - more mailing lists