[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240828182908.3735344-7-lizetao1@huawei.com>
Date: Thu, 29 Aug 2024 02:29:00 +0800
From: Li Zetao <lizetao1@...wei.com>
To: <clm@...com>, <josef@...icpanda.com>, <dsterba@...e.com>,
<terrelln@...com>, <quwenruo.btrfs@....com>, <willy@...radead.org>,
<dan.carpenter@...aro.org>
CC: <lizetao1@...wei.com>, <linux-btrfs@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
Subject: [PATCH -next v2 06/14] btrfs: convert submit_eb_subpage() to take a folio
The old page API is being gradually replaced and converted to use folio
to improve code readability and avoid repeated conversion between page
and folio. Moreover, use folio_pos() instend of page_offset(),
which is more consistent with folio usage.
Signed-off-by: Li Zetao <lizetao1@...wei.com>
---
fs/btrfs/extent_io.c | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index fa436a4ab46a..f40aecb96cba 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1736,12 +1736,11 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
* Return >=0 for the number of submitted extent buffers.
* Return <0 for fatal error.
*/
-static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
+static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc)
{
- struct btrfs_fs_info *fs_info = page_to_fs_info(page);
- struct folio *folio = page_folio(page);
+ struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
int submitted = 0;
- u64 page_start = page_offset(page);
+ u64 folio_start = folio_pos(folio);
int bit_start = 0;
int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
@@ -1756,21 +1755,21 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
* Take private lock to ensure the subpage won't be detached
* in the meantime.
*/
- spin_lock(&page->mapping->i_private_lock);
+ spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
break;
}
spin_lock_irqsave(&subpage->lock, flags);
if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
bit_start++;
continue;
}
- start = page_start + bit_start * fs_info->sectorsize;
+ start = folio_start + bit_start * fs_info->sectorsize;
bit_start += sectors_per_node;
/*
@@ -1779,7 +1778,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
*/
eb = find_extent_buffer_nolock(fs_info, start);
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->i_private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
/*
* The eb has already reached 0 refs thus find_extent_buffer()
@@ -1830,7 +1829,7 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
return 0;
if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
- return submit_eb_subpage(page, wbc);
+ return submit_eb_subpage(folio, wbc);
spin_lock(&mapping->i_private_lock);
if (!folio_test_private(folio)) {
--
2.34.1
Powered by blists - more mailing lists