[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20260205094934.10500-1-sun.jian.kdev@gmail.com>
Date: Thu, 5 Feb 2026 17:49:34 +0800
From: Sun Jian <sun.jian.kdev@...il.com>
To: Konstantin Komarov <almaz.alexandrovich@...agon-software.com>
Cc: ntfs3@...ts.linux.dev,
linux-kernel@...r.kernel.org,
Sun Jian <sun.jian.kdev@...il.com>
Subject: [PATCH] fs/ntfs3: return folios from ntfs_lock_new_page()
ntfs_lock_new_page() currently returns a struct page * but it primarily
operates on folios via __filemap_get_folio(). Convert it to return a
struct folio * and use folio_alloc() + __folio_set_locked() for the
temporary page used to avoid data corruption during decompression.
When the cached folio is not uptodate, keep the existing behavior by
calling folio_file_page() and converting the returned page back to a
folio. Defensively handle error pointers from folio_file_page().
ni_readpage_cmpr() is updated to consume the folio return value while
keeping the existing page array and cleanup logic unchanged.
Signed-off-by: Sun Jian <sun.jian.kdev@...il.com>
---
fs/ntfs3/frecord.c | 33 +++++++++++++++++++--------------
1 file changed, 19 insertions(+), 14 deletions(-)
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 641ddaf8d4a0..a23aaf34f001 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -2022,27 +2022,31 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
return err;
}
-static struct page *ntfs_lock_new_page(struct address_space *mapping,
- pgoff_t index, gfp_t gfp)
+static struct folio *ntfs_lock_new_page(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp)
{
struct folio *folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
- struct page *page;
if (IS_ERR(folio))
- return ERR_CAST(folio);
+ return folio;
- if (!folio_test_uptodate(folio))
- return folio_file_page(folio, index);
+ if (!folio_test_uptodate(folio)) {
+ struct page *page = folio_file_page(folio, index);
+
+ if (IS_ERR(page))
+ return ERR_CAST(page);
+ return page_folio(page);
+ }
/* Use a temporary page to avoid data corruption */
folio_unlock(folio);
folio_put(folio);
- page = alloc_page(gfp);
- if (!page)
+ folio = folio_alloc(gfp, 0);
+ if (!folio)
return ERR_PTR(-ENOMEM);
- __SetPageLocked(page);
- return page;
+ __folio_set_locked(folio);
+ return folio;
}
/*
@@ -2064,6 +2068,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio)
u32 i, idx, frame_size, pages_per_frame;
gfp_t gfp_mask;
struct page *pg;
+ struct folio *f;
if (vbo >= i_size_read(&ni->vfs_inode)) {
folio_zero_range(folio, 0, folio_size(folio));
@@ -2099,12 +2104,12 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio)
if (i == idx)
continue;
- pg = ntfs_lock_new_page(mapping, index, gfp_mask);
- if (IS_ERR(pg)) {
- err = PTR_ERR(pg);
+ f = ntfs_lock_new_page(mapping, index, gfp_mask);
+ if (IS_ERR(f)) {
+ err = PTR_ERR(f);
goto out1;
}
- pages[i] = pg;
+ pages[i] = &f->page;
}
err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame, 0);
--
2.43.0
Powered by blists - more mailing lists