[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aOa0UPnxJVGvqc8S@gourry-fedora-PF4VCD3F>
Date: Wed, 8 Oct 2025 14:58:24 -0400
From: Gregory Price <gourry@...rry.net>
To: David Hildenbrand <david@...hat.com>
Cc: linux-mm@...ck.org, corbet@....net, muchun.song@...ux.dev,
osalvador@...e.de, akpm@...ux-foundation.org, hannes@...xchg.org,
laoar.shao@...il.com, brauner@...nel.org, mclapinski@...gle.com,
joel.granados@...nel.org, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, Mel Gorman <mgorman@...e.de>,
Michal Hocko <mhocko@...e.com>,
Alexandru Moise <00moses.alexander00@...il.com>,
David Rientjes <rientjes@...gle.com>
Subject: Re: [PATCH] Revert "mm, hugetlb: remove hugepages_treat_as_movable
sysctl"
On Wed, Oct 08, 2025 at 04:44:22PM +0200, David Hildenbrand wrote:
> On 08.10.25 16:18, Gregory Price wrote:
> > On Wed, Oct 08, 2025 at 10:58:23AM +0200, David Hildenbrand wrote:
> > > On 07.10.25 23:44, Gregory Price wrote:
> > > I mean, this is as ugly as it gets.
> > >
> > > Can't we just let that old approach RIP where it belongs? :)
> > >
> >
> > Definitely - just found this previously existed and wanted to probe for
> > how offensive reintroducing it would be. Seems the answer is essentially
> > "lets do it a little differently".
> >
> > > Something I could sympathize is is treaing gigantic pages that are actually
> > > migratable as movable.
> > >
> > ...
> > > - gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
> > > + gfp |= hugepage_migration_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
> > >
> > > Assume you want to offline part of the ZONE_MOVABLE there might still be sufficient
> > > space to possibly allocate a 1 GiB area elsewhere and actually move the gigantic page.
> > >
> > > IIRC, we do the same for memory offlining already.
> > >
> >
> > This is generally true of other page sizes as well, though, isn't it?
> > If the system is truly so pressured that it can't successfully move a
> > 2MB page - offline may still fail. So allowing 1GB pages is only a risk
> > in the sense that they're harder to allocate new targets.
>
> Right, but memory defragmentation works on pageblock level, so 2 MiB is much
> MUCH more reliable :)
>
fwiw this works cleanly. Just dropping this here, but should continue
the zone conversation. I need to check, but does this actually allow
pinnable allocations? I thought pinning kicked off migration.
================== test =======================
# echo 1 > /proc/sys/vm/movable_gigantic_pages
# echo 1 > /sys/devices/system/node/node0/hugepages/hugepages-1048576kB/nr_hugepages
# echo 1 > /sys/devices/system/node/node1/hugepages/hugepages-1048576kB/nr_hugepages
# ./huge
Allocating 1GB hugepage
Binding hugepage to NUMA node 1
Faulting page in
Resetting mbind policy to MPOL_DEFAULT (local policy)
Migrating
Migrated pages from node 1 to node 0, pages not moved: 0
================== patch =======================
commit 395988dc319771db980dab3f95ed9ec8f0b74945
Author: Gregory Price <gourry@...rry.net>
Date: Tue Oct 7 10:11:51 2025 -0700
mm, hugetlb: introduce movable_gigantic_pages
Signed-off-by: Gregory Price <gourry@...rry.net>
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 9bef46151d53..1535c9a964dc 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -54,6 +54,7 @@ Currently, these files are in /proc/sys/vm:
- mmap_min_addr
- mmap_rnd_bits
- mmap_rnd_compat_bits
+- movable_gigantic_pages
- nr_hugepages
- nr_hugepages_mempolicy
- nr_overcommit_hugepages
@@ -624,6 +625,22 @@ This value can be changed after boot using the
/proc/sys/vm/mmap_rnd_compat_bits tunable
+movable_gigantic_pages
+======================
+
+This parameter controls whether gigantic pages may be allocated from
+ZONE_MOVABLE. If set to non-zero, gigantic hugepages can be allocated
+from ZONE_MOVABLE. ZONE_MOVABLE memory may be created via the kernel
+boot parameter `kernelcore` or via memory hotplug as discussed in
+Documentation/admin-guide/mm/memory-hotplug.rst.
+
+Support may depend on specific architecture.
+
+Note that using ZONE_MOVABLE gigantic pages may make features like
+memory hotremove more unreliable, as migrating gigantic pages is more
+difficult due to needing larger amounts of phyiscally contiguous memory.
+
+
nr_hugepages
============
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 42f374e828a2..834061eb2ddd 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -172,6 +172,7 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
+extern int movable_gigantic_pages __read_mostly;
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages[MAX_NUMNODES];
@@ -924,7 +925,7 @@ static inline bool hugepage_movable_supported(struct hstate *h)
if (!hugepage_migration_supported(h))
return false;
- if (hstate_is_gigantic(h))
+ if (hstate_is_gigantic(h) && !movable_gigantic_pages)
return false;
return true;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a0d285d20992..3f8f3d6f2d60 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -54,6 +54,8 @@
#include "hugetlb_cma.h"
#include <linux/page-isolation.h>
+int movable_gigantic_pages;
+
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
@@ -5199,6 +5201,13 @@ static const struct ctl_table hugetlb_table[] = {
.mode = 0644,
.proc_handler = hugetlb_overcommit_handler,
},
+ {
+ .procname = "movable_gigantic_pages",
+ .data = &movable_gigantic_pages,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
};
static void __init hugetlb_sysctl_init(void)
================== huge.c =======================
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#include <string.h>
#include <errno.h>
#include <sys/syscall.h>
#include <linux/mempolicy.h>
#include <stdint.h>
#include <time.h>
#ifndef MAP_HUGE_SHIFT
#define MAP_HUGE_SHIFT 26
#endif
#ifndef MAP_HUGE_1GB
#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
#endif
static long mbind_syscall(void *addr, unsigned long len, int mode,
const unsigned long *nodemask, unsigned long maxnode, unsigned flags) {
return syscall(__NR_mbind, addr, len, mode, nodemask, maxnode, flags);
}
static long migrate_pages_syscall(pid_t pid, unsigned long maxnode,
const unsigned long *from, const unsigned long *to) {
return syscall(__NR_migrate_pages, pid, maxnode, from, to);
}
int main() {
size_t size = 1UL << 30; // 1GB
int node_from = 1;
int node_to = 0;
printf("Allocating 1GB hugepage\n");
void *addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap hugepage");
return 1;
}
printf("Binding hugepage to NUMA node %d\n", node_from);
unsigned long nodemask = 1UL << node_from;
if (mbind_syscall(addr, size, MPOL_BIND, &nodemask, sizeof(nodemask) * 8, 0) != 0) {
perror("mbind");
munmap(addr, size);
return 1;
}
printf("Faulting page in\n");
((volatile char *)addr)[0] = 0;
printf("Resetting mbind policy to MPOL_DEFAULT (local policy)\n");
if (mbind_syscall(addr, size, MPOL_DEFAULT, NULL, 0, 0) != 0) {
perror("mbind failed to reset");
munmap(addr, size);
return 1;
}
printf("Migrating\n");
unsigned long from_mask = 1UL << node_from;
unsigned long to_mask = 1UL << node_to;
long ret = migrate_pages_syscall(0, sizeof(unsigned long) * 8, &from_mask, &to_mask);
if (ret < 0) {
perror("migrate_pages");
munmap(addr, size);
return 1;
}
printf("Migrated pages from node %d to node %d, pages not moved: %ld\n", node_from, node_to, ret);
munmap(addr, size);
return 0;
}
Powered by blists - more mailing lists