[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220317132450.5116-1-osalvador@suse.de>
Date: Thu, 17 Mar 2022 14:24:50 +0100
From: Oscar Salvador <osalvador@...e.de>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>,
"Huang, Ying" <ying.huang@...el.com>,
Abhishek Goel <huntbag@...ux.vnet.ibm.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Oscar Salvador <osalvador@...e.de>
Subject: [PATCH] mm: Untangle config dependencies for demote-on-reclaim
At the time demote-on-reclaim was introduced, it was tied to
CONFIG_HOTPLUG_CPU + CONFIG_MIGRATE, but that is not really
accurate.
The only two things we need to depen on is CONFIG_NUMA +
CONFIG_MIGRATE, so clean this up.
Furthermore, we only register the hotplug memory notifier
when the system has CONFIG_MEMORY_HOTPLUG.
Signed-off-by: Oscar Salvador <osalvador@...e.de>
Suggested-by: "Huang, Ying" <ying.huang@...el.com>
---
include/linux/migrate.h | 5 ++++-
mm/migrate.c | 11 ++++++-----
mm/vmstat.c | 2 --
3 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a4a336fd81fc..1efabe7bb5fc 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -49,14 +49,17 @@ int folio_migrate_mapping(struct address_space *mapping,
extern bool numa_demotion_enabled;
extern void migrate_on_reclaim_init(void);
-#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_NUMA
extern void set_migration_target_nodes(void);
+extern void migrate_on_reclaim_init(void);
#else
static inline void set_migration_target_nodes(void) {}
+static inline void migrate_on_reclaim_init(void) {}
#endif
#else
static inline void set_migration_target_nodes(void) {}
+static inline void migrate_on_reclaim_init(void) {}
static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t new,
diff --git a/mm/migrate.c b/mm/migrate.c
index 3364bfaddeef..118f71425241 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2144,7 +2144,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
-#endif /* CONFIG_NUMA */
/*
* node_demotion[] example:
@@ -2278,7 +2277,6 @@ int next_demotion_node(int node)
return target;
}
-#if defined(CONFIG_HOTPLUG_CPU)
/* Disable reclaim-based migration. */
static void __disable_all_migrate_targets(void)
{
@@ -2471,6 +2469,7 @@ void set_migration_target_nodes(void)
* __set_migration_target_nodes() can be used as opposed to
* set_migration_target_nodes().
*/
+#ifdef CONFIG_MEMORY_HOTPLUG
static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
unsigned long action, void *_arg)
{
@@ -2516,6 +2515,7 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
return notifier_from_errno(0);
}
+#endif
void __init migrate_on_reclaim_init(void)
{
@@ -2523,8 +2523,9 @@ void __init migrate_on_reclaim_init(void)
sizeof(struct demotion_nodes),
GFP_KERNEL);
WARN_ON(!node_demotion);
-
+#ifdef CONFIG_MEMORY_HOTPLUG
hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
+#endif
/*
* At this point, all numa nodes with memory/CPus have their state
* properly set, so we can build the demotion order now.
@@ -2535,7 +2536,6 @@ void __init migrate_on_reclaim_init(void)
set_migration_target_nodes();
cpus_read_unlock();
}
-#endif /* CONFIG_HOTPLUG_CPU */
bool numa_demotion_enabled = false;
@@ -2596,4 +2596,5 @@ static int __init numa_init_sysfs(void)
return err;
}
subsys_initcall(numa_init_sysfs);
-#endif
+#endif /* CONFIG_SYSFS */
+#endif /* CONFIG_NUMA */
diff --git a/mm/vmstat.c b/mm/vmstat.c
index b75b1a64b54c..f2d0dec1062d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -2111,9 +2111,7 @@ void __init init_mm_internals(void)
start_shepherd_timer();
#endif
-#if defined(CONFIG_MIGRATION) && defined(CONFIG_HOTPLUG_CPU)
migrate_on_reclaim_init();
-#endif
#ifdef CONFIG_PROC_FS
proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
--
2.34.1
Powered by blists - more mailing lists