[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20181108202659.GD6090@fieldses.org>
Date: Thu, 8 Nov 2018 15:26:59 -0500
From: "J. Bruce Fields" <bfields@...ldses.org>
To: NeilBrown <neilb@...e.com>
Cc: Jeff Layton <jlayton@...nel.org>,
Alexander Viro <viro@...iv.linux.org.uk>,
Martin Wilck <mwilck@...e.de>, linux-fsdevel@...r.kernel.org,
Frank Filz <ffilzlnx@...dspring.com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 01/12] fs/locks: rename some lists and pointers.
On Mon, Nov 05, 2018 at 12:30:47PM +1100, NeilBrown wrote:
> struct file lock contains an 'fl_next' pointer which
> is used to point to the lock that this request is blocked
> waiting for. So rename it to fl_blocker.
>
> The fl_blocked list_head in an active lock is the head of a list of
> blocked requests. In a request it is a node in that list.
> These are two distinct uses, so replace with two list_heads
> with different names.
> fl_blocked is the head of a list of blocked requests
> fl_block is a node on that list.
Reading these, I have a lot of trouble keeping fl_blocked, fl_block, and
fl_blocker straight. Is it just me?
I guess they form a tree, so fl_children, fl_siblings, and fl_parent
might be easier for me to keep straight.
--b.
>
> The two different list_heads are never used at the same time, but that
> will change in a future patch.
>
> Note that a tracepoint is changed to report fl_blocker instead
> of fl_next.
>
> Signed-off-by: NeilBrown <neilb@...e.com>
> ---
> fs/cifs/file.c | 2 +-
> fs/locks.c | 38 ++++++++++++++++++++------------------
> include/linux/fs.h | 7 +++++--
> include/trace/events/filelock.h | 16 ++++++++--------
> 4 files changed, 34 insertions(+), 29 deletions(-)
>
> diff --git a/fs/cifs/file.c b/fs/cifs/file.c
> index 74c33d5fafc8..d7ed895e05d1 100644
> --- a/fs/cifs/file.c
> +++ b/fs/cifs/file.c
> @@ -1103,7 +1103,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
> rc = posix_lock_file(file, flock, NULL);
> up_write(&cinode->lock_sem);
> if (rc == FILE_LOCK_DEFERRED) {
> - rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
> + rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
> if (!rc)
> goto try_again;
> posix_unblock_lock(flock);
> diff --git a/fs/locks.c b/fs/locks.c
> index 2ecb4db8c840..a6c6d601286c 100644
> --- a/fs/locks.c
> +++ b/fs/locks.c
> @@ -189,7 +189,7 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
> * This lock protects the blocked_hash. Generally, if you're accessing it, you
> * want to be holding this lock.
> *
> - * In addition, it also protects the fl->fl_block list, and the fl->fl_next
> + * In addition, it also protects the fl->fl_blocked list, and the fl->fl_blocker
> * pointer for file_lock structures that are acting as lock requests (in
> * contrast to those that are acting as records of acquired locks).
> *
> @@ -293,6 +293,7 @@ static void locks_init_lock_heads(struct file_lock *fl)
> {
> INIT_HLIST_NODE(&fl->fl_link);
> INIT_LIST_HEAD(&fl->fl_list);
> + INIT_LIST_HEAD(&fl->fl_blocked);
> INIT_LIST_HEAD(&fl->fl_block);
> init_waitqueue_head(&fl->fl_wait);
> }
> @@ -332,6 +333,7 @@ void locks_free_lock(struct file_lock *fl)
> {
> BUG_ON(waitqueue_active(&fl->fl_wait));
> BUG_ON(!list_empty(&fl->fl_list));
> + BUG_ON(!list_empty(&fl->fl_blocked));
> BUG_ON(!list_empty(&fl->fl_block));
> BUG_ON(!hlist_unhashed(&fl->fl_link));
>
> @@ -667,7 +669,7 @@ static void __locks_delete_block(struct file_lock *waiter)
> {
> locks_delete_global_blocked(waiter);
> list_del_init(&waiter->fl_block);
> - waiter->fl_next = NULL;
> + waiter->fl_blocker = NULL;
> }
>
> static void locks_delete_block(struct file_lock *waiter)
> @@ -683,16 +685,16 @@ static void locks_delete_block(struct file_lock *waiter)
> * it seems like the reasonable thing to do.
> *
> * Must be called with both the flc_lock and blocked_lock_lock held. The
> - * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
> + * fl_blocked list itself is protected by the blocked_lock_lock, but by ensuring
> * that the flc_lock is also held on insertions we can avoid taking the
> - * blocked_lock_lock in some cases when we see that the fl_block list is empty.
> + * blocked_lock_lock in some cases when we see that the fl_blocked list is empty.
> */
> static void __locks_insert_block(struct file_lock *blocker,
> struct file_lock *waiter)
> {
> BUG_ON(!list_empty(&waiter->fl_block));
> - waiter->fl_next = blocker;
> - list_add_tail(&waiter->fl_block, &blocker->fl_block);
> + waiter->fl_blocker = blocker;
> + list_add_tail(&waiter->fl_block, &blocker->fl_blocked);
> if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
> locks_insert_global_blocked(waiter);
> }
> @@ -716,18 +718,18 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
> /*
> * Avoid taking global lock if list is empty. This is safe since new
> * blocked requests are only added to the list under the flc_lock, and
> - * the flc_lock is always held here. Note that removal from the fl_block
> + * the flc_lock is always held here. Note that removal from the fl_blocked
> * list does not require the flc_lock, so we must recheck list_empty()
> * after acquiring the blocked_lock_lock.
> */
> - if (list_empty(&blocker->fl_block))
> + if (list_empty(&blocker->fl_blocked))
> return;
>
> spin_lock(&blocked_lock_lock);
> - while (!list_empty(&blocker->fl_block)) {
> + while (!list_empty(&blocker->fl_blocked)) {
> struct file_lock *waiter;
>
> - waiter = list_first_entry(&blocker->fl_block,
> + waiter = list_first_entry(&blocker->fl_blocked,
> struct file_lock, fl_block);
> __locks_delete_block(waiter);
> if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
> @@ -878,7 +880,7 @@ static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
>
> hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
> if (posix_same_owner(fl, block_fl))
> - return fl->fl_next;
> + return fl->fl_blocker;
> }
> return NULL;
> }
> @@ -1237,7 +1239,7 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
> error = posix_lock_inode(inode, fl, NULL);
> if (error != FILE_LOCK_DEFERRED)
> break;
> - error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
> + error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
> if (!error)
> continue;
>
> @@ -1324,7 +1326,7 @@ int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
> error = posix_lock_inode(inode, &fl, NULL);
> if (error != FILE_LOCK_DEFERRED)
> break;
> - error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
> + error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
> if (!error) {
> /*
> * If we've been sleeping someone might have
> @@ -1518,7 +1520,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
>
> locks_dispose_list(&dispose);
> error = wait_event_interruptible_timeout(new_fl->fl_wait,
> - !new_fl->fl_next, break_time);
> + !new_fl->fl_blocker, break_time);
>
> percpu_down_read_preempt_disable(&file_rwsem);
> spin_lock(&ctx->flc_lock);
> @@ -1931,7 +1933,7 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
> error = flock_lock_inode(inode, fl);
> if (error != FILE_LOCK_DEFERRED)
> break;
> - error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
> + error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
> if (!error)
> continue;
>
> @@ -2210,7 +2212,7 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
> error = vfs_lock_file(filp, cmd, fl, NULL);
> if (error != FILE_LOCK_DEFERRED)
> break;
> - error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
> + error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
> if (!error)
> continue;
>
> @@ -2581,7 +2583,7 @@ posix_unblock_lock(struct file_lock *waiter)
> int status = 0;
>
> spin_lock(&blocked_lock_lock);
> - if (waiter->fl_next)
> + if (waiter->fl_blocker)
> __locks_delete_block(waiter);
> else
> status = -ENOENT;
> @@ -2707,7 +2709,7 @@ static int locks_show(struct seq_file *f, void *v)
>
> lock_get_status(f, fl, iter->li_pos, "");
>
> - list_for_each_entry(bfl, &fl->fl_block, fl_block)
> + list_for_each_entry(bfl, &fl->fl_blocked, fl_block)
> lock_get_status(f, bfl, iter->li_pos, " ->");
>
> return 0;
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index c95c0807471f..a161bcdca8a2 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -1044,10 +1044,13 @@ bool opens_in_grace(struct net *);
> * Obviously, the last two criteria only matter for POSIX locks.
> */
> struct file_lock {
> - struct file_lock *fl_next; /* singly linked list for this inode */
> + struct file_lock *fl_blocker; /* The lock, that is blocking us */
> struct list_head fl_list; /* link into file_lock_context */
> struct hlist_node fl_link; /* node in global lists */
> - struct list_head fl_block; /* circular list of blocked processes */
> + struct list_head fl_blocked; /* list of requests with
> + * ->fl_blocker pointing here */
> + struct list_head fl_block; /* node in
> + * ->fl_blocker->fl_blocked */
> fl_owner_t fl_owner;
> unsigned int fl_flags;
> unsigned char fl_type;
> diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
> index 68b17c116907..fad7befa612d 100644
> --- a/include/trace/events/filelock.h
> +++ b/include/trace/events/filelock.h
> @@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
> __field(struct file_lock *, fl)
> __field(unsigned long, i_ino)
> __field(dev_t, s_dev)
> - __field(struct file_lock *, fl_next)
> + __field(struct file_lock *, fl_blocker)
> __field(fl_owner_t, fl_owner)
> __field(unsigned int, fl_pid)
> __field(unsigned int, fl_flags)
> @@ -82,7 +82,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
> __entry->fl = fl ? fl : NULL;
> __entry->s_dev = inode->i_sb->s_dev;
> __entry->i_ino = inode->i_ino;
> - __entry->fl_next = fl ? fl->fl_next : NULL;
> + __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
> __entry->fl_owner = fl ? fl->fl_owner : NULL;
> __entry->fl_pid = fl ? fl->fl_pid : 0;
> __entry->fl_flags = fl ? fl->fl_flags : 0;
> @@ -92,9 +92,9 @@ DECLARE_EVENT_CLASS(filelock_lock,
> __entry->ret = ret;
> ),
>
> - TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
> + TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
> __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
> - __entry->i_ino, __entry->fl_next, __entry->fl_owner,
> + __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
> __entry->fl_pid, show_fl_flags(__entry->fl_flags),
> show_fl_type(__entry->fl_type),
> __entry->fl_start, __entry->fl_end, __entry->ret)
> @@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
> __field(struct file_lock *, fl)
> __field(unsigned long, i_ino)
> __field(dev_t, s_dev)
> - __field(struct file_lock *, fl_next)
> + __field(struct file_lock *, fl_blocker)
> __field(fl_owner_t, fl_owner)
> __field(unsigned int, fl_flags)
> __field(unsigned char, fl_type)
> @@ -137,7 +137,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
> __entry->fl = fl ? fl : NULL;
> __entry->s_dev = inode->i_sb->s_dev;
> __entry->i_ino = inode->i_ino;
> - __entry->fl_next = fl ? fl->fl_next : NULL;
> + __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
> __entry->fl_owner = fl ? fl->fl_owner : NULL;
> __entry->fl_flags = fl ? fl->fl_flags : 0;
> __entry->fl_type = fl ? fl->fl_type : 0;
> @@ -145,9 +145,9 @@ DECLARE_EVENT_CLASS(filelock_lease,
> __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
> ),
>
> - TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
> + TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
> __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
> - __entry->i_ino, __entry->fl_next, __entry->fl_owner,
> + __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
> show_fl_flags(__entry->fl_flags),
> show_fl_type(__entry->fl_type),
> __entry->fl_break_time, __entry->fl_downgrade_time)
>
Powered by blists - more mailing lists