[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3d70cc1e-6508-417c-87a5-8fe6de10f5ac@linux.alibaba.com>
Date: Mon, 27 Oct 2025 14:28:45 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Nico Pache <npache@...hat.com>, linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-doc@...r.kernel.org
Cc: david@...hat.com, ziy@...dia.com, lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com, ryan.roberts@....com, dev.jain@....com,
corbet@....net, rostedt@...dmis.org, mhiramat@...nel.org,
mathieu.desnoyers@...icios.com, akpm@...ux-foundation.org,
baohua@...nel.org, willy@...radead.org, peterx@...hat.com,
wangkefeng.wang@...wei.com, usamaarif642@...il.com, sunnanyong@...wei.com,
vishal.moola@...il.com, thomas.hellstrom@...ux.intel.com,
yang@...amperecomputing.com, kas@...nel.org, aarcange@...hat.com,
raquini@...hat.com, anshuman.khandual@....com, catalin.marinas@....com,
tiwai@...e.de, will@...nel.org, dave.hansen@...ux.intel.com, jack@...e.cz,
cl@...two.org, jglisse@...gle.com, surenb@...gle.com, zokeefe@...gle.com,
hannes@...xchg.org, rientjes@...gle.com, mhocko@...e.com,
rdunlap@...radead.org, hughd@...gle.com, richard.weiyang@...il.com,
lance.yang@...ux.dev, vbabka@...e.cz, rppt@...nel.org, jannh@...gle.com,
pfalcato@...e.de
Subject: Re: [PATCH v12 mm-new 12/15] khugepaged: Introduce mTHP collapse
support
On 2025/10/23 02:37, Nico Pache wrote:
> During PMD range scanning, track occupied pages in a bitmap. If mTHPs are
> enabled we remove the restriction of max_ptes_none during the scan phase
> to avoid missing potential mTHP candidates.
>
> Implement collapse_scan_bitmap() to perform binary recursion on the bitmap
> and determine the best eligible order for the collapse. A stack struct is
> used instead of traditional recursion. The algorithm splits the bitmap
> into smaller chunks to find the best fit mTHP. max_ptes_none is scaled by
> the attempted collapse order to determine how "full" an order must be
> before being considered for collapse.
>
> Once we determine what mTHP sizes fits best in that PMD range a collapse
> is attempted. A minimum collapse order of 2 is used as this is the lowest
> order supported by anon memory.
>
> mTHP collapses reject regions containing swapped out or shared pages.
> This is because adding new entries can lead to new none pages, and these
> may lead to constant promotion into a higher order (m)THP. A similar
> issue can occur with "max_ptes_none > HPAGE_PMD_NR/2" due to a collapse
> introducing at least 2x the number of pages, and on a future scan will
> satisfy the promotion condition once again. This issue is prevented via
> the collapse_allowable_orders() function.
>
> Currently madv_collapse is not supported and will only attempt PMD
> collapse.
>
> We can also remove the check for is_khugepaged inside the PMD scan as
> the collapse_max_ptes_none() function handles this logic now.
>
> Signed-off-by: Nico Pache <npache@...hat.com>
I've tested this patch, and it works as expected. (Some nits are listed
below)
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
Tested-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
> ---
> include/linux/khugepaged.h | 2 +
> mm/khugepaged.c | 128 ++++++++++++++++++++++++++++++++++---
> 2 files changed, 122 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
> index eb1946a70cff..179ce716e769 100644
> --- a/include/linux/khugepaged.h
> +++ b/include/linux/khugepaged.h
> @@ -1,6 +1,8 @@
> /* SPDX-License-Identifier: GPL-2.0 */
> #ifndef _LINUX_KHUGEPAGED_H
> #define _LINUX_KHUGEPAGED_H
> +#define KHUGEPAGED_MIN_MTHP_ORDER 2
> +#define MAX_MTHP_BITMAP_STACK (1UL << (ilog2(MAX_PTRS_PER_PTE) - KHUGEPAGED_MIN_MTHP_ORDER))
>
> #include <linux/mm.h>
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 89a105124790..e2319bfd0065 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -93,6 +93,11 @@ static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
>
> static struct kmem_cache *mm_slot_cache __ro_after_init;
>
> +struct scan_bit_state {
> + u8 order;
> + u16 offset;
> +};
> +
> struct collapse_control {
> bool is_khugepaged;
>
> @@ -101,6 +106,13 @@ struct collapse_control {
>
> /* nodemask for allocation fallback */
> nodemask_t alloc_nmask;
> +
> + /*
> + * bitmap used to collapse mTHP sizes.
> + */
> + DECLARE_BITMAP(mthp_bitmap, HPAGE_PMD_NR);
> + DECLARE_BITMAP(mthp_bitmap_mask, HPAGE_PMD_NR);
Nit: please remove the extra spaces.
> + struct scan_bit_state mthp_bitmap_stack[MAX_MTHP_BITMAP_STACK];
> };
>
> /**
> @@ -1357,6 +1369,85 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long pmd_address,
> return result;
> }
>
> +static void push_mthp_bitmap_stack(struct collapse_control *cc, int *top,
> + u8 order, u16 offset)
> +{
> + cc->mthp_bitmap_stack[++*top] = (struct scan_bit_state)
> + { order, offset };
> +}
> +
> +/*
> + * collapse_scan_bitmap() consumes the bitmap that is generated during
> + * collapse_scan_pmd() to determine what regions and mTHP orders fit best.
> + *
> + * Each bit in the bitmap represents a single occupied (!none/zero) page.
> + * A stack structure cc->mthp_bitmap_stack is used to check different regions
> + * of the bitmap for collapse eligibility. We start at the PMD order and
> + * check if it is eligible for collapse; if not, we add two entries to the
> + * stack at a lower order to represent the left and right halves of the region.
> + *
> + * For each region, we calculate the number of set bits and compare it
> + * against a threshold derived from collapse_max_ptes_none(). A region is
> + * eligible if the number of set bits exceeds this threshold.
> + */
> +static int collapse_scan_bitmap(struct mm_struct *mm, unsigned long address,
> + int referenced, int unmapped, struct collapse_control *cc,
> + bool *mmap_locked, unsigned long enabled_orders)
> +{
> + u8 order, next_order;
> + u16 offset, mid_offset;
> + int num_chunks;
> + int bits_set, threshold_bits;
> + int top = -1;
> + int collapsed = 0;
> + int ret;
> + struct scan_bit_state state;
> + unsigned int max_none_ptes;
Nit: could you rearrange the order of variable definitions? Like reverse
Christmas trees.
> +
> + push_mthp_bitmap_stack(cc, &top, HPAGE_PMD_ORDER - KHUGEPAGED_MIN_MTHP_ORDER, 0);
> +
> + while (top >= 0) {
> + state = cc->mthp_bitmap_stack[top--];
> + order = state.order + KHUGEPAGED_MIN_MTHP_ORDER;
> + offset = state.offset;
> + num_chunks = 1UL << order;
Nit: ‘num_chunks’ should be 'unsigned long'.
> +
> + /* Skip mTHP orders that are not enabled */
> + if (!test_bit(order, &enabled_orders))
> + goto next_order;
> +
> + max_none_ptes = collapse_max_ptes_none(order, !cc->is_khugepaged);
> +
> + /* Calculate weight of the range */
> + bitmap_zero(cc->mthp_bitmap_mask, HPAGE_PMD_NR);
> + bitmap_set(cc->mthp_bitmap_mask, offset, num_chunks);
> + bits_set = bitmap_weight_and(cc->mthp_bitmap,
> + cc->mthp_bitmap_mask, HPAGE_PMD_NR);
> +
> + threshold_bits = (1UL << order) - max_none_ptes - 1;
> +
> + /* Check if the region is eligible based on the threshold */
> + if (bits_set > threshold_bits) {
> + ret = collapse_huge_page(mm, address, referenced,
> + unmapped, cc, mmap_locked,
> + order, offset);
> + if (ret == SCAN_SUCCEED) {
> + collapsed += 1UL << order;
> + continue;
> + }
> + }
> +
> +next_order:
> + if (state.order > 0) {
> + next_order = state.order - 1;
> + mid_offset = offset + (num_chunks / 2);
> + push_mthp_bitmap_stack(cc, &top, next_order, mid_offset);
> + push_mthp_bitmap_stack(cc, &top, next_order, offset);
> + }
> + }
> + return collapsed;
> +}
> +
> static int collapse_scan_pmd(struct mm_struct *mm,
> struct vm_area_struct *vma,
> unsigned long start_addr, bool *mmap_locked,
> @@ -1364,11 +1455,15 @@ static int collapse_scan_pmd(struct mm_struct *mm,
> {
> pmd_t *pmd;
> pte_t *pte, *_pte;
> + int i;
> int result = SCAN_FAIL, referenced = 0;
> - int none_or_zero = 0, shared = 0;
> + int none_or_zero = 0, shared = 0, nr_collapsed = 0;
> struct page *page = NULL;
> + unsigned int max_ptes_none;
> struct folio *folio = NULL;
> unsigned long addr;
> + unsigned long enabled_orders;
> + bool full_scan = true;
> spinlock_t *ptl;
> int node = NUMA_NO_NODE, unmapped = 0;
>
> @@ -1378,16 +1473,29 @@ static int collapse_scan_pmd(struct mm_struct *mm,
> if (result != SCAN_SUCCEED)
> goto out;
>
> + bitmap_zero(cc->mthp_bitmap, HPAGE_PMD_NR);
> memset(cc->node_load, 0, sizeof(cc->node_load));
> nodes_clear(cc->alloc_nmask);
> +
> + enabled_orders = collapse_allowable_orders(vma, vma->vm_flags, cc->is_khugepaged);
> +
> + /*
> + * If PMD is the only enabled order, enforce max_ptes_none, otherwise
> + * scan all pages to populate the bitmap for mTHP collapse.
> + */
> + if (cc->is_khugepaged && enabled_orders == _BITUL(HPAGE_PMD_ORDER))
> + full_scan = false;
> + max_ptes_none = collapse_max_ptes_none(HPAGE_PMD_ORDER, full_scan);
> +
> pte = pte_offset_map_lock(mm, pmd, start_addr, &ptl);
> if (!pte) {
> result = SCAN_PMD_NULL;
> goto out;
> }
>
> - for (addr = start_addr, _pte = pte; _pte < pte + HPAGE_PMD_NR;
> - _pte++, addr += PAGE_SIZE) {
> + for (i = 0; i < HPAGE_PMD_NR; i++) {
> + _pte = pte + i;
> + addr = start_addr + i * PAGE_SIZE;
> pte_t pteval = ptep_get(_pte);
> if (is_swap_pte(pteval)) {
> ++unmapped;
> @@ -1412,8 +1520,7 @@ static int collapse_scan_pmd(struct mm_struct *mm,
> if (pte_none_or_zero(pteval)) {
> ++none_or_zero;
> if (!userfaultfd_armed(vma) &&
> - (!cc->is_khugepaged ||
> - none_or_zero <= khugepaged_max_ptes_none)) {
> + none_or_zero <= max_ptes_none) {
> continue;
> } else {
> result = SCAN_EXCEED_NONE_PTE;
> @@ -1461,6 +1568,8 @@ static int collapse_scan_pmd(struct mm_struct *mm,
> }
> }
>
> + /* Set bit for occupied pages */
> + bitmap_set(cc->mthp_bitmap, i, 1);
> /*
> * Record which node the original page is from and save this
> * information to cc->node_load[].
> @@ -1517,9 +1626,12 @@ static int collapse_scan_pmd(struct mm_struct *mm,
> out_unmap:
> pte_unmap_unlock(pte, ptl);
> if (result == SCAN_SUCCEED) {
> - result = collapse_huge_page(mm, start_addr, referenced,
> - unmapped, cc, mmap_locked,
> - HPAGE_PMD_ORDER, 0);
> + nr_collapsed = collapse_scan_bitmap(mm, start_addr, referenced, unmapped,
> + cc, mmap_locked, enabled_orders);
> + if (nr_collapsed > 0)
> + result = SCAN_SUCCEED;
> + else
> + result = SCAN_FAIL;
> }
> out:
> trace_mm_khugepaged_scan_pmd(mm, folio, referenced,
Powered by blists - more mailing lists