[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <wwx6x46p7gkrunyh6arukpy3fhh7jzgyy4f64khvgfmqa7husc@5d7sqtpuqwgx>
Date: Thu, 30 Oct 2025 14:57:18 +0100
From: Jan Kara <jack@...e.cz>
To: Mateusz Guzik <mjguzik@...il.com>
Cc: brauner@...nel.org, viro@...iv.linux.org.uk, jack@...e.cz,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org
Subject: Re: [PATCH v2 2/2] fs: cosmetic fixes to lru handling
On Wed 29-10-25 14:14:28, Mateusz Guzik wrote:
> 1. inode_bit_waitqueue() was somehow placed between __inode_add_lru() and
> inode_add_lru(). move it up
> 2. assert ->i_lock is held in __inode_add_lru instead of just claiming it is
> needed
> 3. s/__inode_add_lru/__inode_lru_list_add/ for consistency with itself
> (inode_lru_list_del()) and similar routines for sb and io list
> management
> 4. push list presence check into inode_lru_list_del(), just like sb and
> io list
>
> Signed-off-by: Mateusz Guzik <mjguzik@...il.com>
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@...e.cz>
Honza
> ---
>
> rebased
>
> fs/fs-writeback.c | 2 +-
> fs/inode.c | 50 ++++++++++++++++++++++++----------------------
> include/linux/fs.h | 2 +-
> mm/filemap.c | 4 ++--
> mm/truncate.c | 6 +++---
> mm/vmscan.c | 2 +-
> mm/workingset.c | 2 +-
> 7 files changed, 35 insertions(+), 33 deletions(-)
>
> diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
> index 5dccbe5fb09d..c81fffcb3648 100644
> --- a/fs/fs-writeback.c
> +++ b/fs/fs-writeback.c
> @@ -1455,7 +1455,7 @@ static void inode_sync_complete(struct inode *inode)
>
> inode_state_clear(inode, I_SYNC);
> /* If inode is clean an unused, put it into LRU now... */
> - inode_add_lru(inode);
> + inode_lru_list_add(inode);
> /* Called with inode->i_lock which ensures memory ordering. */
> inode_wake_up_bit(inode, __I_SYNC);
> }
> diff --git a/fs/inode.c b/fs/inode.c
> index b5c2efebaa18..faf99d916afc 100644
> --- a/fs/inode.c
> +++ b/fs/inode.c
> @@ -530,23 +530,6 @@ void ihold(struct inode *inode)
> }
> EXPORT_SYMBOL(ihold);
>
> -static void __inode_add_lru(struct inode *inode, bool rotate)
> -{
> - if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
> - return;
> - if (icount_read(inode))
> - return;
> - if (!(inode->i_sb->s_flags & SB_ACTIVE))
> - return;
> - if (!mapping_shrinkable(&inode->i_data))
> - return;
> -
> - if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
> - this_cpu_inc(nr_unused);
> - else if (rotate)
> - inode_state_set(inode, I_REFERENCED);
> -}
> -
> struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
> struct inode *inode, u32 bit)
> {
> @@ -584,18 +567,38 @@ void wait_on_new_inode(struct inode *inode)
> }
> EXPORT_SYMBOL(wait_on_new_inode);
>
> +static void __inode_lru_list_add(struct inode *inode, bool rotate)
> +{
> + lockdep_assert_held(&inode->i_lock);
> +
> + if (inode_state_read(inode) & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
> + return;
> + if (icount_read(inode))
> + return;
> + if (!(inode->i_sb->s_flags & SB_ACTIVE))
> + return;
> + if (!mapping_shrinkable(&inode->i_data))
> + return;
> +
> + if (list_lru_add_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
> + this_cpu_inc(nr_unused);
> + else if (rotate)
> + inode_state_set(inode, I_REFERENCED);
> +}
> +
> /*
> * Add inode to LRU if needed (inode is unused and clean).
> - *
> - * Needs inode->i_lock held.
> */
> -void inode_add_lru(struct inode *inode)
> +void inode_lru_list_add(struct inode *inode)
> {
> - __inode_add_lru(inode, false);
> + __inode_lru_list_add(inode, false);
> }
>
> static void inode_lru_list_del(struct inode *inode)
> {
> + if (list_empty(&inode->i_lru))
> + return;
> +
> if (list_lru_del_obj(&inode->i_sb->s_inode_lru, &inode->i_lru))
> this_cpu_dec(nr_unused);
> }
> @@ -1924,7 +1927,7 @@ static void iput_final(struct inode *inode)
> if (!drop &&
> !(inode_state_read(inode) & I_DONTCACHE) &&
> (sb->s_flags & SB_ACTIVE)) {
> - __inode_add_lru(inode, true);
> + __inode_lru_list_add(inode, true);
> spin_unlock(&inode->i_lock);
> return;
> }
> @@ -1948,8 +1951,7 @@ static void iput_final(struct inode *inode)
> inode_state_replace(inode, I_WILL_FREE, I_FREEING);
> }
>
> - if (!list_empty(&inode->i_lru))
> - inode_lru_list_del(inode);
> + inode_lru_list_del(inode);
> spin_unlock(&inode->i_lock);
>
> evict(inode);
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index a813abdcf218..33129cda3a99 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -3502,7 +3502,7 @@ static inline void remove_inode_hash(struct inode *inode)
> }
>
> extern void inode_sb_list_add(struct inode *inode);
> -extern void inode_add_lru(struct inode *inode);
> +extern void inode_lru_list_add(struct inode *inode);
>
> extern int sb_set_blocksize(struct super_block *, int);
> extern int sb_min_blocksize(struct super_block *, int);
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 13f0259d993c..add5228a7d97 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -256,7 +256,7 @@ void filemap_remove_folio(struct folio *folio)
> __filemap_remove_folio(folio, NULL);
> xa_unlock_irq(&mapping->i_pages);
> if (mapping_shrinkable(mapping))
> - inode_add_lru(mapping->host);
> + inode_lru_list_add(mapping->host);
> spin_unlock(&mapping->host->i_lock);
>
> filemap_free_folio(mapping, folio);
> @@ -335,7 +335,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
> page_cache_delete_batch(mapping, fbatch);
> xa_unlock_irq(&mapping->i_pages);
> if (mapping_shrinkable(mapping))
> - inode_add_lru(mapping->host);
> + inode_lru_list_add(mapping->host);
> spin_unlock(&mapping->host->i_lock);
>
> for (i = 0; i < folio_batch_count(fbatch); i++)
> diff --git a/mm/truncate.c b/mm/truncate.c
> index 91eb92a5ce4f..ad9c0fa29d94 100644
> --- a/mm/truncate.c
> +++ b/mm/truncate.c
> @@ -46,7 +46,7 @@ static void clear_shadow_entries(struct address_space *mapping,
>
> xas_unlock_irq(&xas);
> if (mapping_shrinkable(mapping))
> - inode_add_lru(mapping->host);
> + inode_lru_list_add(mapping->host);
> spin_unlock(&mapping->host->i_lock);
> }
>
> @@ -111,7 +111,7 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
>
> xas_unlock_irq(&xas);
> if (mapping_shrinkable(mapping))
> - inode_add_lru(mapping->host);
> + inode_lru_list_add(mapping->host);
> spin_unlock(&mapping->host->i_lock);
> out:
> folio_batch_remove_exceptionals(fbatch);
> @@ -622,7 +622,7 @@ int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
> __filemap_remove_folio(folio, NULL);
> xa_unlock_irq(&mapping->i_pages);
> if (mapping_shrinkable(mapping))
> - inode_add_lru(mapping->host);
> + inode_lru_list_add(mapping->host);
> spin_unlock(&mapping->host->i_lock);
>
> filemap_free_folio(mapping, folio);
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index b2fc8b626d3d..bb4a96c7b682 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -811,7 +811,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
> __filemap_remove_folio(folio, shadow);
> xa_unlock_irq(&mapping->i_pages);
> if (mapping_shrinkable(mapping))
> - inode_add_lru(mapping->host);
> + inode_lru_list_add(mapping->host);
> spin_unlock(&mapping->host->i_lock);
>
> if (free_folio)
> diff --git a/mm/workingset.c b/mm/workingset.c
> index 68a76a91111f..d32dc2e02a61 100644
> --- a/mm/workingset.c
> +++ b/mm/workingset.c
> @@ -755,7 +755,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
> xa_unlock_irq(&mapping->i_pages);
> if (mapping->host != NULL) {
> if (mapping_shrinkable(mapping))
> - inode_add_lru(mapping->host);
> + inode_lru_list_add(mapping->host);
> spin_unlock(&mapping->host->i_lock);
> }
> ret = LRU_REMOVED_RETRY;
> --
> 2.34.1
>
--
Jan Kara <jack@...e.com>
SUSE Labs, CR
Powered by blists - more mailing lists