[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260108105519.994856-2-wangjinchao600@gmail.com>
Date: Thu, 8 Jan 2026 18:55:10 +0800
From: Jinchao Wang <wangjinchao600@...il.com>
To: syzbot+2d9c96466c978346b55f@...kaller.appspotmail.com
Cc: linux-kernel@...r.kernel.org,
Jinchao Wang <wangjinchao600@...il.com>
Subject: [PATCH] try fix
#syz test
---
fs/hugetlbfs/inode.c | 33 ++++++++++++++++++++++-----------
include/linux/hugetlb.h | 2 +-
include/linux/pagemap.h | 6 ++++++
3 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9c94ed8c3ab0..a8f33b72a8a6 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -643,17 +643,16 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
remove_inode_hugepages(inode, offset, LLONG_MAX);
}
-static void hugetlbfs_zero_partial_page(struct hstate *h,
- struct address_space *mapping,
- loff_t start,
- loff_t end)
+static int hugetlbfs_zero_partial_page(struct hstate *h,
+ struct address_space *mapping,
+ loff_t start, loff_t end)
{
pgoff_t idx = start >> huge_page_shift(h);
struct folio *folio;
folio = filemap_lock_hugetlb_folio(h, mapping, idx);
if (IS_ERR(folio))
- return;
+ return PTR_ERR(folio);
start = start & ~huge_page_mask(h);
end = end & ~huge_page_mask(h);
@@ -673,6 +672,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
struct hstate *h = hstate_inode(inode);
loff_t hpage_size = huge_page_size(h);
loff_t hole_start, hole_end;
+ int rc;
/*
* hole_start and hole_end indicate the full pages within the hole.
@@ -688,12 +688,18 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
return -EPERM;
}
+repeat:
i_mmap_lock_write(mapping);
/* If range starts before first full page, zero partial page. */
- if (offset < hole_start)
- hugetlbfs_zero_partial_page(h, mapping,
- offset, min(offset + len, hole_start));
+ if (offset < hole_start) {
+ rc = hugetlbfs_zero_partial_page(h, mapping, offset,
+ min(offset + len, hole_start));
+ if (rc == -EAGAIN) {
+ i_mmap_unlock_write(mapping);
+ goto repeat;
+ }
+ }
/* Unmap users of full pages in the hole. */
if (hole_end > hole_start) {
@@ -704,9 +710,14 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
/* If range extends beyond last full page, zero partial page. */
- if ((offset + len) > hole_end && (offset + len) > hole_start)
- hugetlbfs_zero_partial_page(h, mapping,
- hole_end, offset + len);
+ if ((offset + len) > hole_end && (offset + len) > hole_start) {
+ rc = hugetlbfs_zero_partial_page(h, mapping, hole_end,
+ offset + len);
+ if (rc == -EAGAIN) {
+ i_mmap_unlock_write(mapping);
+ goto repeat;
+ }
+ }
i_mmap_unlock_write(mapping);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 8e63e46b8e1f..e7d99183361a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -811,7 +811,7 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
struct address_space *mapping, pgoff_t idx)
{
- return filemap_lock_folio(mapping, idx << huge_page_order(h));
+ return filemap_lock_folio_nowait(mapping, idx << huge_page_order(h));
}
#include <asm/hugetlb.h>
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 09b581c1d878..94f31035fe1d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -820,6 +820,12 @@ static inline struct folio *filemap_lock_folio(struct address_space *mapping,
return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
}
+static inline struct folio *filemap_lock_folio_nowait(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, FGP_LOCK | FGP_NOWAIT, 0);
+}
+
/**
* filemap_grab_folio - grab a folio from the page cache
* @mapping: The address space to search
--
2.43.0
Powered by blists - more mailing lists