[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211117043127.GK24307@magnolia>
Date: Tue, 16 Nov 2021 20:31:27 -0800
From: "Darrick J. Wong" <djwong@...nel.org>
To: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Cc: linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@...radead.org>,
Christoph Hellwig <hch@....de>
Subject: Re: [PATCH v2 20/28] iomap: Convert iomap_write_begin() and
iomap_write_end() to folios
On Mon, Nov 08, 2021 at 04:05:43AM +0000, Matthew Wilcox (Oracle) wrote:
> These functions still only work in PAGE_SIZE chunks, but there are
> fewer conversions from tail to head pages as a result of this patch.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> Reviewed-by: Christoph Hellwig <hch@....de>
> ---
> fs/iomap/buffered-io.c | 66 ++++++++++++++++++++----------------------
> 1 file changed, 31 insertions(+), 35 deletions(-)
>
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 9c61d12028ca..f4ae200adc4c 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
<snip>
> @@ -741,6 +737,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
> long status = 0;
>
> do {
> + struct folio *folio;
> struct page *page;
> unsigned long offset; /* Offset into pagecache page */
> unsigned long bytes; /* Bytes to write to page */
> @@ -764,16 +761,17 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
> break;
> }
>
> - status = iomap_write_begin(iter, pos, bytes, &page);
> + status = iomap_write_begin(iter, pos, bytes, &folio);
> if (unlikely(status))
> break;
>
> + page = folio_file_page(folio, pos >> PAGE_SHIFT);
> if (mapping_writably_mapped(iter->inode->i_mapping))
> flush_dcache_page(page);
>
> copied = copy_page_from_iter_atomic(page, offset, bytes, i);
Hrmm. In principle (or I guess even a subsequent patch), if we had
multi-page folios, could we simply loop the pages in the folio instead
of doing a single page and then calling back into iomap_write_begin to
get (probably) the same folio?
This looks like a fairly straightforward conversion, but I was wondering
about that one little point...
Reviewed-by: Darrick J. Wong <djwong@...nel.org>
--D
>
> - status = iomap_write_end(iter, pos, bytes, copied, page);
> + status = iomap_write_end(iter, pos, bytes, copied, folio);
>
> if (unlikely(copied != status))
> iov_iter_revert(i, copied - status);
> @@ -839,13 +837,13 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
> do {
> unsigned long offset = offset_in_page(pos);
> unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
> - struct page *page;
> + struct folio *folio;
>
> - status = iomap_write_begin(iter, pos, bytes, &page);
> + status = iomap_write_begin(iter, pos, bytes, &folio);
> if (unlikely(status))
> return status;
>
> - status = iomap_write_end(iter, pos, bytes, bytes, page);
> + status = iomap_write_end(iter, pos, bytes, bytes, folio);
> if (WARN_ON_ONCE(status == 0))
> return -EIO;
>
> @@ -882,21 +880,19 @@ EXPORT_SYMBOL_GPL(iomap_file_unshare);
> static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
> {
> struct folio *folio;
> - struct page *page;
> int status;
> size_t offset, bytes;
>
> - status = iomap_write_begin(iter, pos, length, &page);
> + status = iomap_write_begin(iter, pos, length, &folio);
> if (status)
> return status;
> - folio = page_folio(page);
>
> offset = offset_in_folio(folio, pos);
> bytes = min_t(u64, folio_size(folio) - offset, length);
> folio_zero_range(folio, offset, bytes);
> folio_mark_accessed(folio);
>
> - return iomap_write_end(iter, pos, bytes, bytes, page);
> + return iomap_write_end(iter, pos, bytes, bytes, folio);
> }
>
> static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
> --
> 2.33.0
>
Powered by blists - more mailing lists