[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220704070612.299585-5-aneesh.kumar@linux.ibm.com>
Date: Mon, 4 Jul 2022 12:36:04 +0530
From: "Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>
To: linux-mm@...ck.org, akpm@...ux-foundation.org
Cc: Wei Xu <weixugc@...gle.com>, Huang Ying <ying.huang@...el.com>,
Yang Shi <shy828301@...il.com>,
Davidlohr Bueso <dave@...olabs.net>,
Tim C Chen <tim.c.chen@...el.com>,
Michal Hocko <mhocko@...nel.org>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Hesham Almatary <hesham.almatary@...wei.com>,
Dave Hansen <dave.hansen@...el.com>,
Jonathan Cameron <Jonathan.Cameron@...wei.com>,
Alistair Popple <apopple@...dia.com>,
Dan Williams <dan.j.williams@...el.com>,
Johannes Weiner <hannes@...xchg.org>, jvgediya.oss@...il.com,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>
Subject: [PATCH v8 04/12] mm/demotion: Add hotplug callbacks to handle new numa node onlined
If the new NUMA node onlined doesn't have a memory tier assigned,
the kernel adds the NUMA node to default memory tier.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@...ux.ibm.com>
---
mm/memory-tiers.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 68 insertions(+)
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index fc404fcff7ff..2147112981a6 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/lockdep.h>
#include <linux/moduleparam.h>
+#include <linux/memory.h>
#include <linux/memory-tiers.h>
struct memory_tier {
@@ -130,8 +131,73 @@ int node_create_and_set_memory_tier(int node, int tier)
}
EXPORT_SYMBOL_GPL(node_create_and_set_memory_tier);
+static int __node_set_memory_tier(int node, int tier)
+{
+ int ret = 0;
+ struct memory_tier *memtier;
+
+ memtier = __get_memory_tier_from_id(tier);
+ if (!memtier) {
+ ret = -EINVAL;
+ goto out;
+ }
+ node_set(node, memtier->nodelist);
+out:
+ return ret;
+}
+
+static int node_set_memory_tier(int node, int tier)
+{
+ struct memory_tier *memtier;
+ int ret = 0;
+
+ mutex_lock(&memory_tier_lock);
+ memtier = __node_get_memory_tier(node);
+ if (!memtier)
+ ret = __node_set_memory_tier(node, tier);
+
+ mutex_unlock(&memory_tier_lock);
+
+ return ret;
+}
+
static unsigned int default_memtier = DEFAULT_MEMORY_TIER;
core_param(default_memory_tier, default_memtier, uint, 0644);
+/*
+ * This runs whether reclaim-based migration is enabled or not,
+ * which ensures that the user can turn reclaim-based migration
+ * at any time without needing to recalculate migration targets.
+ */
+static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
+ unsigned long action, void *_arg)
+{
+ struct memory_notify *arg = _arg;
+
+ /*
+ * Only update the node migration order when a node is
+ * changing status, like online->offline.
+ */
+ if (arg->status_change_nid < 0)
+ return notifier_from_errno(0);
+
+ switch (action) {
+ case MEM_ONLINE:
+ /*
+ * We ignore the error here, if the node already have the tier
+ * registered, we will continue to use that for the new memory
+ * we are adding here.
+ */
+ node_set_memory_tier(arg->status_change_nid, default_memtier);
+ break;
+ }
+
+ return notifier_from_errno(0);
+}
+
+static void __init migrate_on_reclaim_init(void)
+{
+ hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
+}
static int __init memory_tier_init(void)
{
@@ -153,6 +219,8 @@ static int __init memory_tier_init(void)
/* CPU only nodes are not part of memory tiers. */
memtier->nodelist = node_states[N_MEMORY];
mutex_unlock(&memory_tier_lock);
+
+ migrate_on_reclaim_init();
return 0;
}
subsys_initcall(memory_tier_init);
--
2.36.1
Powered by blists - more mailing lists