[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240812165444.GG6043@frogsfrogsfrogs>
Date: Mon, 12 Aug 2024 09:54:44 -0700
From: "Darrick J. Wong" <djwong@...nel.org>
To: Zhang Yi <yi.zhang@...weicloud.com>
Cc: linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, hch@...radead.org, brauner@...nel.org,
david@...morbit.com, jack@...e.cz, willy@...radead.org,
yi.zhang@...wei.com, chengzhihao1@...wei.com, yukuai3@...wei.com
Subject: Re: [PATCH v2 6/6] iomap: reduce unnecessary state_lock when setting
ifs uptodate and dirty bits
On Mon, Aug 12, 2024 at 08:11:59PM +0800, Zhang Yi wrote:
> From: Zhang Yi <yi.zhang@...wei.com>
>
> When doing buffered write, we set uptodate and drity bits of the written
> range separately, it holds the ifs->state_lock twice when blocksize <
> folio size, which is redundant. After large folio is supported, the
> spinlock could affect more about the performance, merge them could
> reduce some unnecessary locking overhead and gets some performance gain.
>
> Suggested-by: Dave Chinner <david@...morbit.com>
> Signed-off-by: Zhang Yi <yi.zhang@...wei.com>
Seems reasonable to me
Reviewed-by: Darrick J. Wong <djwong@...nel.org>
--D
> ---
> fs/iomap/buffered-io.c | 38 +++++++++++++++++++++++++++++++++++---
> 1 file changed, 35 insertions(+), 3 deletions(-)
>
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 96600405dbb5..67d7c1c22c98 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -182,6 +182,37 @@ static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
> ifs_set_range_dirty(folio, ifs, off, len);
> }
>
> +static void ifs_set_range_dirty_uptodate(struct folio *folio,
> + struct iomap_folio_state *ifs, size_t off, size_t len)
> +{
> + struct inode *inode = folio->mapping->host;
> + unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
> + unsigned int first_blk = (off >> inode->i_blkbits);
> + unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
> + unsigned int nr_blks = last_blk - first_blk + 1;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&ifs->state_lock, flags);
> + bitmap_set(ifs->state, first_blk, nr_blks);
> + if (ifs_is_fully_uptodate(folio, ifs))
> + folio_mark_uptodate(folio);
> + bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
> + spin_unlock_irqrestore(&ifs->state_lock, flags);
> +}
> +
> +static void iomap_set_range_dirty_uptodate(struct folio *folio,
> + size_t off, size_t len)
> +{
> + struct iomap_folio_state *ifs = folio->private;
> +
> + if (ifs)
> + ifs_set_range_dirty_uptodate(folio, ifs, off, len);
> + else
> + folio_mark_uptodate(folio);
> +
> + filemap_dirty_folio(folio->mapping, folio);
> +}
> +
> static struct iomap_folio_state *ifs_alloc(struct inode *inode,
> struct folio *folio, unsigned int flags)
> {
> @@ -851,6 +882,8 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
> static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
> size_t copied, struct folio *folio)
> {
> + size_t from = offset_in_folio(folio, pos);
> +
> flush_dcache_folio(folio);
>
> /*
> @@ -866,9 +899,8 @@ static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
> */
> if (unlikely(copied < len && !folio_test_uptodate(folio)))
> return false;
> - iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
> - iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
> - filemap_dirty_folio(inode->i_mapping, folio);
> +
> + iomap_set_range_dirty_uptodate(folio, from, copied);
> return true;
> }
>
> --
> 2.39.2
>
>
Powered by blists - more mailing lists