[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <527dea9d7e32fe6b94c7fe00df2c126203017911.1738918357.git.baolin.wang@linux.alibaba.com>
Date: Fri, 7 Feb 2025 17:44:21 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
hughd@...gle.com
Cc: david@...hat.com,
baolin.wang@...ux.alibaba.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 5/6] mm: shmem: factor out the within_size logic into a new helper
Factor out the within_size logic into a new helper to remove duplicate
code.
Suggested-by: David Hildenbrand <david@...hat.com>
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/shmem.c | 53 +++++++++++++++++++++++++++--------------------------
1 file changed, 27 insertions(+), 26 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index c243d814f2b0..671f63063fd4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -590,6 +590,28 @@ shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t w
return order > 0 ? BIT(order + 1) - 1 : 0;
}
+static unsigned int shmem_get_orders_within_size(struct inode *inode,
+ unsigned long within_size_orders, pgoff_t index,
+ loff_t write_end)
+{
+ pgoff_t aligned_index;
+ unsigned long order;
+ loff_t i_size;
+
+ order = highest_order(within_size_orders);
+ while (within_size_orders) {
+ aligned_index = round_up(index + 1, 1 << order);
+ i_size = max(write_end, i_size_read(inode));
+ i_size = round_up(i_size, PAGE_SIZE);
+ if (i_size >> PAGE_SHIFT >= aligned_index)
+ return within_size_orders;
+
+ order = next_order(&within_size_orders, order);
+ }
+
+ return 0;
+}
+
static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
loff_t write_end, bool shmem_huge_force,
struct vm_area_struct *vma,
@@ -598,9 +620,6 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
0 : BIT(HPAGE_PMD_ORDER);
unsigned long within_size_orders;
- unsigned int order;
- pgoff_t aligned_index;
- loff_t i_size;
if (!S_ISREG(inode->i_mode))
return 0;
@@ -634,16 +653,11 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
index, write_end);
- order = highest_order(within_size_orders);
- while (within_size_orders) {
- aligned_index = round_up(index + 1, 1 << order);
- i_size = max(write_end, i_size_read(inode));
- i_size = round_up(i_size, PAGE_SIZE);
- if (i_size >> PAGE_SHIFT >= aligned_index)
- return within_size_orders;
+ within_size_orders = shmem_get_orders_within_size(inode, within_size_orders,
+ index, write_end);
+ if (within_size_orders > 0)
+ return within_size_orders;
- order = next_order(&within_size_orders, order);
- }
fallthrough;
case SHMEM_HUGE_ADVISE:
if (vm_flags & VM_HUGEPAGE)
@@ -1756,10 +1770,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
- pgoff_t aligned_index;
unsigned int global_orders;
- loff_t i_size;
- int order;
if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
return 0;
@@ -1785,17 +1796,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
return READ_ONCE(huge_shmem_orders_inherit);
/* Allow mTHP that will be fully within i_size. */
- order = highest_order(within_size_orders);
- while (within_size_orders) {
- aligned_index = round_up(index + 1, 1 << order);
- i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >> PAGE_SHIFT >= aligned_index) {
- mask |= within_size_orders;
- break;
- }
-
- order = next_order(&within_size_orders, order);
- }
+ mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0);
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);
--
2.39.3
Powered by blists - more mailing lists