[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250825-tassen-saugt-7fb4b5909be1@brauner>
Date: Mon, 25 Aug 2025 13:46:25 +0200
From: Christian Brauner <brauner@...nel.org>
To: Josef Bacik <josef@...icpanda.com>
Cc: linux-fsdevel@...r.kernel.org, linux-btrfs@...r.kernel.org,
kernel-team@...com, linux-ext4@...r.kernel.org, linux-xfs@...r.kernel.org,
viro@...iv.linux.org.uk
Subject: Re: [PATCH 35/50] fs: remove I_WILL_FREE|I_FREEING from
fs-writeback.c
On Thu, Aug 21, 2025 at 04:18:46PM -0400, Josef Bacik wrote:
> Now that we have the reference count to indicate live inodes, we can
> remove all of the uses of I_WILL_FREE and I_FREEING in fs-writeback.c
> and use the appropriate reference count checks.
>
> Signed-off-by: Josef Bacik <josef@...icpanda.com>
> ---
Very nice change and it will be even better without the plain accesses
to i_count. ;)
> fs/fs-writeback.c | 47 ++++++++++++++++++++++++++---------------------
> 1 file changed, 26 insertions(+), 21 deletions(-)
>
> diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
> index 789c4228412c..0ea6d0e86791 100644
> --- a/fs/fs-writeback.c
> +++ b/fs/fs-writeback.c
> @@ -129,7 +129,7 @@ static bool inode_io_list_move_locked(struct inode *inode,
> {
> assert_spin_locked(&wb->list_lock);
> assert_spin_locked(&inode->i_lock);
> - WARN_ON_ONCE(inode->i_state & I_FREEING);
> + WARN_ON_ONCE(!refcount_read(&inode->i_count));
>
> if (list_empty(&inode->i_io_list))
> iobj_get(inode);
> @@ -314,7 +314,7 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
> {
> assert_spin_locked(&wb->list_lock);
> assert_spin_locked(&inode->i_lock);
> - WARN_ON_ONCE(inode->i_state & I_FREEING);
> + WARN_ON_ONCE(!refcount_read(&inode->i_count));
>
> inode->i_state &= ~I_SYNC_QUEUED;
> if (wb != &wb->bdi->wb)
> @@ -415,10 +415,10 @@ static bool inode_do_switch_wbs(struct inode *inode,
> xa_lock_irq(&mapping->i_pages);
>
> /*
> - * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
> - * path owns the inode and we shouldn't modify ->i_io_list.
> + * Once the refcount is 0, the eviction path owns the inode and we
> + * shouldn't modify ->i_io_list.
> */
> - if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
> + if (unlikely(!refcount_read(&inode->i_count)))
> goto skip_switch;
>
> trace_inode_switch_wbs(inode, old_wb, new_wb);
> @@ -570,13 +570,16 @@ static bool inode_prepare_wbs_switch(struct inode *inode,
> /* while holding I_WB_SWITCH, no one else can update the association */
> spin_lock(&inode->i_lock);
> if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
> - inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
> + inode->i_state & I_WB_SWITCH ||
> inode_to_wb(inode) == new_wb) {
> spin_unlock(&inode->i_lock);
> return false;
> }
> + if (!inode_tryget(inode)) {
> + spin_unlock(&inode->i_lock);
> + return false;
> + }
> inode->i_state |= I_WB_SWITCH;
> - __iget(inode);
> spin_unlock(&inode->i_lock);
>
> return true;
> @@ -1207,7 +1210,7 @@ static void inode_cgwb_move_to_attached(struct inode *inode,
> {
> assert_spin_locked(&wb->list_lock);
> assert_spin_locked(&inode->i_lock);
> - WARN_ON_ONCE(inode->i_state & I_FREEING);
> + WARN_ON_ONCE(!refcount_read(&inode->i_count));
>
> inode->i_state &= ~I_SYNC_QUEUED;
> inode_delete_from_io_list(inode);
> @@ -1405,7 +1408,7 @@ static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
> * tracking. Flush worker will ignore this inode anyway and it will
> * trigger assertions in inode_io_list_move_locked().
> */
> - if (inode->i_state & I_FREEING) {
> + if (!refcount_read(&inode->i_count)) {
> inode_delete_from_io_list(inode);
> wb_io_lists_depopulated(wb);
> return;
> @@ -1621,7 +1624,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
> struct writeback_control *wbc,
> unsigned long dirtied_before)
> {
> - if (inode->i_state & I_FREEING)
> + if (!refcount_read(&inode->i_count))
> return;
>
> /*
> @@ -1787,7 +1790,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
> * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
> *
> * To prevent the inode from going away, either the caller must have a reference
> - * to the inode, or the inode must have I_WILL_FREE or I_FREEING set.
> + * to the inode, or the inode must have a zero refcount.
> */
> static int writeback_single_inode(struct inode *inode,
> struct writeback_control *wbc)
> @@ -1797,9 +1800,7 @@ static int writeback_single_inode(struct inode *inode,
>
> spin_lock(&inode->i_lock);
> if (!refcount_read(&inode->i_count))
> - WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
> - else
> - WARN_ON(inode->i_state & I_WILL_FREE);
> + WARN_ON(inode->i_state & I_NEW);
>
> if (inode->i_state & I_SYNC) {
> /*
> @@ -1837,7 +1838,7 @@ static int writeback_single_inode(struct inode *inode,
> * If the inode is freeing, its i_io_list shoudn't be updated
> * as it can be finally deleted at this moment.
> */
> - if (!(inode->i_state & I_FREEING)) {
> + if (refcount_read(&inode->i_count)) {
> /*
> * If the inode is now fully clean, then it can be safely
> * removed from its writeback list (if any). Otherwise the
> @@ -1957,7 +1958,7 @@ static long writeback_sb_inodes(struct super_block *sb,
> * kind writeout is handled by the freer.
> */
> spin_lock(&inode->i_lock);
> - if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
> + if ((inode->i_state & I_NEW) || !refcount_read(&inode->i_count)) {
> redirty_tail_locked(inode, wb);
> spin_unlock(&inode->i_lock);
> continue;
> @@ -2615,7 +2616,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
> if (inode_unhashed(inode))
> goto out_unlock;
> }
> - if (inode->i_state & I_FREEING)
> + if (!refcount_read(&inode->i_count))
> goto out_unlock;
>
> /*
> @@ -2729,13 +2730,17 @@ static void wait_sb_inodes(struct super_block *sb)
> spin_unlock_irq(&sb->s_inode_wblist_lock);
>
> spin_lock(&inode->i_lock);
> - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
> + if (inode->i_state & I_NEW) {
> + spin_unlock(&inode->i_lock);
> + spin_lock_irq(&sb->s_inode_wblist_lock);
> + continue;
> + }
> +
> + if (!inode_tryget(inode)) {
> spin_unlock(&inode->i_lock);
> -
> spin_lock_irq(&sb->s_inode_wblist_lock);
> continue;
> }
> - __iget(inode);
>
> /*
> * We could have potentially ended up on the cached LRU list, so
> @@ -2886,7 +2891,7 @@ EXPORT_SYMBOL(sync_inodes_sb);
> * This function commits an inode to disk immediately if it is dirty. This is
> * primarily needed by knfsd.
> *
> - * The caller must either have a ref on the inode or must have set I_WILL_FREE.
> + * The caller must either have a ref on the inode or the inode must have a zero refcount.
> */
> int write_inode_now(struct inode *inode, int sync)
> {
> --
> 2.49.0
>
Powered by blists - more mailing lists