lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230511065607.37407-4-ying.huang@intel.com>
Date:   Thu, 11 May 2023 14:56:04 +0800
From:   Huang Ying <ying.huang@...el.com>
To:     linux-mm@...ck.org
Cc:     linux-kernel@...r.kernel.org,
        Arjan Van De Ven <arjan@...ux.intel.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Huang Ying <ying.huang@...el.com>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Vlastimil Babka <vbabka@...e.cz>,
        David Hildenbrand <david@...hat.com>,
        Johannes Weiner <jweiner@...hat.com>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        Michal Hocko <mhocko@...e.com>,
        Pavel Tatashin <pasha.tatashin@...een.com>,
        Matthew Wilcox <willy@...radead.org>
Subject: [RFC 3/6] mm: support multiple zone instances per zone type in memory online

Because there will be multiple movable zone instances, when a memory
section is onlined to movable zones, a movable zone instance needs to
be chosen similar as that for kernel zones.

The online target zone instance is chosen based on the zone instance
range for both movable and kernel zones.

Signed-off-by: "Huang, Ying" <ying.huang@...el.com>
Cc: Mel Gorman <mgorman@...hsingularity.net>
Cc: Vlastimil Babka <vbabka@...e.cz>
Cc: David Hildenbrand <david@...hat.com>
Cc: Johannes Weiner <jweiner@...hat.com>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: Michal Hocko <mhocko@...e.com>
Cc: Pavel Tatashin <pasha.tatashin@...een.com>
Cc: Matthew Wilcox <willy@...radead.org>
---
 mm/memory_hotplug.c | 38 ++++++++++++++++++++++++++++++--------
 1 file changed, 30 insertions(+), 8 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 24df4acbeeae..4e7cad6d48dd 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -842,11 +842,37 @@ static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn
 
 		if (zone_intersects(zone, start_pfn, nr_pages))
 			return zone;
+		if (start_pfn < zone->zone_start_pfn)
+			return zone;
 	}
 
 	return &pgdat->node_zones[last_zone_idx(pgdat, ZONE_NORMAL)];
 }
 
+/*
+ * Returns a default movable memory zone for the given pfn range.
+ */
+static struct zone *default_movable_zone_for_pfn(int nid, unsigned long start_pfn,
+		unsigned long nr_pages)
+{
+	struct pglist_data *pgdat = NODE_DATA(nid);
+	struct zone *zone;
+	int zid;
+
+	for (zid = start_zone_idx(pgdat, ZONE_MOVABLE);
+	     zid < last_zone_idx(pgdat, ZONE_MOVABLE);
+	     zid++) {
+		zone = &pgdat->node_zones[zid];
+
+		if (zone_intersects(zone, start_pfn, nr_pages))
+			return zone;
+		if (start_pfn < zone->zone_start_pfn)
+			return zone;
+	}
+
+	return &pgdat->node_zones[last_zone_idx(pgdat, ZONE_MOVABLE)];
+}
+
 /*
  * Determine to which zone to online memory dynamically based on user
  * configuration and system stats. We care about the following ratio:
@@ -904,7 +930,6 @@ static struct zone *auto_movable_zone_for_pfn(int nid,
 {
 	unsigned long online_pages = 0, max_pages, end_pfn;
 	struct page *page;
-	pg_data_t *pgdat;
 
 	if (!auto_movable_ratio)
 		goto kernel_zone;
@@ -953,9 +978,8 @@ static struct zone *auto_movable_zone_for_pfn(int nid,
 	    !auto_movable_can_online_movable(nid, group, nr_pages))
 		goto kernel_zone;
 #endif /* CONFIG_NUMA */
-	pgdat = NODE_DATA(nid);
 
-	return &pgdat->node_zones[last_zone_idx(pgdat, ZONE_MOVABLE)];
+	return default_movable_zone_for_pfn(nid, pfn, nr_pages);
 kernel_zone:
 	return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
 }
@@ -965,8 +989,8 @@ static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn
 {
 	struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
 			nr_pages);
-	pg_data_t *pgdat = NODE_DATA(nid);
-	struct zone *movable_zone = &pgdat->node_zones[last_zone_idx(pgdat, ZONE_MOVABLE)];
+	struct zone *movable_zone = default_movable_zone_for_pfn(nid, start_pfn,
+			nr_pages);
 	bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
 	bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
 
@@ -989,13 +1013,11 @@ struct zone *zone_for_pfn_range(int online_type, int nid,
 		struct memory_group *group, unsigned long start_pfn,
 		unsigned long nr_pages)
 {
-	pg_data_t *pgdat = NODE_DATA(nid);
-
 	if (online_type == MMOP_ONLINE_KERNEL)
 		return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
 
 	if (online_type == MMOP_ONLINE_MOVABLE)
-		return &pgdat->node_zones[last_zone_idx(pgdat, ZONE_MOVABLE)];
+		return default_movable_zone_for_pfn(nid, start_pfn, nr_pages);
 
 	if (online_policy == ONLINE_POLICY_AUTO_MOVABLE)
 		return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages);
-- 
2.39.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ