[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251205233217.3344186-2-joshua.hahnjy@gmail.com>
Date: Fri, 5 Dec 2025 15:32:12 -0800
From: Joshua Hahn <joshua.hahnjy@...il.com>
To:
Cc: "Liam R. Howlett" <Liam.Howlett@...cle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Barry Song <baohua@...nel.org>,
David Hildenbrand <david@...nel.org>,
Dev Jain <dev.jain@....com>,
Lance Yang <lance.yang@...ux.dev>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Masami Hiramatsu <mhiramat@...nel.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Nico Pache <npache@...hat.com>,
Ryan Roberts <ryan.roberts@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Zi Yan <ziy@...dia.com>,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
linux-trace-kernel@...r.kernel.org
Subject: [RFC LPC2025 PATCH 1/4] mm/khugepaged: Remove hpage_collapse_scan_abort
Commit 14a4e2141e24 ("mm, thp: only collapse hugepages to nodes with
affinity for zone_reclaim_mode") introduced khugepaged_scan_abort,
which was later renamed to hpage_collapse_scan_abort. It prevents
collapsing hugepages to remote nodes when zone_reclaim_mode is enabled
as to prefer reclaiming & allocating locally instead of allocating on a
far away remote node (distance > RECLAIM_DISTANCE).
With the zone_reclaim_mode sysctl being deprecated later in the series,
remove hpage_collapse_scan_abort, its callers, and its associated values
in the scan_result enum.
Signed-off-by: Joshua Hahn <joshua.hahnjy@...il.com>
---
include/trace/events/huge_memory.h | 1 -
mm/khugepaged.c | 34 ------------------------------
2 files changed, 35 deletions(-)
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 4cde53b45a85..1c0b146d1286 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -20,7 +20,6 @@
EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
- EM( SCAN_SCAN_ABORT, "scan_aborted") \
EM( SCAN_PAGE_COUNT, "not_suitable_page_count") \
EM( SCAN_PAGE_LRU, "page_not_in_lru") \
EM( SCAN_PAGE_LOCK, "page_locked") \
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 97d1b2824386..a93228a53ee4 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -40,7 +40,6 @@ enum scan_result {
SCAN_PTE_MAPPED_HUGEPAGE,
SCAN_LACK_REFERENCED_PAGE,
SCAN_PAGE_NULL,
- SCAN_SCAN_ABORT,
SCAN_PAGE_COUNT,
SCAN_PAGE_LRU,
SCAN_PAGE_LOCK,
@@ -830,30 +829,6 @@ struct collapse_control khugepaged_collapse_control = {
.is_khugepaged = true,
};
-static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
-{
- int i;
-
- /*
- * If node_reclaim_mode is disabled, then no extra effort is made to
- * allocate memory locally.
- */
- if (!node_reclaim_enabled())
- return false;
-
- /* If there is a count for this node already, it must be acceptable */
- if (cc->node_load[nid])
- return false;
-
- for (i = 0; i < MAX_NUMNODES; i++) {
- if (!cc->node_load[i])
- continue;
- if (node_distance(nid, i) > node_reclaim_distance)
- return true;
- }
- return false;
-}
-
#define khugepaged_defrag() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
@@ -1355,10 +1330,6 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
* hit record.
*/
node = folio_nid(folio);
- if (hpage_collapse_scan_abort(node, cc)) {
- result = SCAN_SCAN_ABORT;
- goto out_unmap;
- }
cc->node_load[node]++;
if (!folio_test_lru(folio)) {
result = SCAN_PAGE_LRU;
@@ -2342,11 +2313,6 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
node = folio_nid(folio);
- if (hpage_collapse_scan_abort(node, cc)) {
- result = SCAN_SCAN_ABORT;
- folio_put(folio);
- break;
- }
cc->node_load[node]++;
if (!folio_test_lru(folio)) {
--
2.47.3
Powered by blists - more mailing lists