lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <63ba9fe3-e384-4396-bd86-fe62150ed1ab@redhat.com>
Date: Mon, 19 May 2025 10:21:41 +0200
From: David Hildenbrand <david@...hat.com>
To: Zi Yan <ziy@...dia.com>, Oscar Salvador <osalvador@...e.de>,
 Johannes Weiner <hannes@...xchg.org>, linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
 Vlastimil Babka <vbabka@...e.cz>, Baolin Wang
 <baolin.wang@...ux.alibaba.com>,
 "Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
 Mel Gorman <mgorman@...hsingularity.net>,
 Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
 Brendan Jackman <jackmanb@...gle.com>, Richard Chang
 <richardycc@...gle.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 2/4] mm/page_isolation: remove migratetype from
 move_freepages_block_isolate()

On 09.05.25 22:01, Zi Yan wrote:
> Since migratetype is no longer overwritten during pageblock isolation,
> moving pageblocks to and from MIGRATE_ISOLATE no longer needs migratetype.
> 
> Add MIGRATETYPE_NO_ISO_MASK to allow read before-isolation migratetype
> when a pageblock is isolated. It is used by move_freepages_block_isolate().
> 
> Add pageblock_isolate_and_move_free_pages() and
> pageblock_unisolate_and_move_free_pages() to be explicit about the page
> isolation operations. Both share the common code in
> __move_freepages_block_isolate(), which is renamed from
> move_freepages_block_isolate().
> 
> Make set_pageblock_migratetype() only accept non MIGRATE_ISOLATE types,
> so that one should use set_pageblock_isolate() to isolate pageblocks.
> 
> Two consequential changes:
> 1. move pageblock migratetype code out of __move_freepages_block().
> 2. in online_pages() from mm/memory_hotplug.c, move_pfn_range_to_zone() is
>     called with MIGRATE_MOVABLE instead of MIGRATE_ISOLATE and all affected
>     pageblocks are isolated afterwards. Otherwise, all online pageblocks
>     will have non-determined migratetype.
> 
> Signed-off-by: Zi Yan <ziy@...dia.com>
> ---
>   include/linux/mmzone.h         |  4 +-
>   include/linux/page-isolation.h |  5 ++-
>   mm/memory_hotplug.c            |  7 +++-
>   mm/page_alloc.c                | 73 +++++++++++++++++++++++++---------
>   mm/page_isolation.c            | 27 ++++++++-----
>   5 files changed, 82 insertions(+), 34 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 7ef01fe148ce..f66895456974 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -107,8 +107,10 @@ static inline bool migratetype_is_mergeable(int mt)
>   extern int page_group_by_mobility_disabled;
>   
>   #ifdef CONFIG_MEMORY_ISOLATION
> -#define MIGRATETYPE_MASK ((BIT(PB_migratetype_bits) - 1) | PB_migrate_isolate_bit)
> +#define MIGRATETYPE_NO_ISO_MASK (BIT(PB_migratetype_bits) - 1)
> +#define MIGRATETYPE_MASK (MIGRATETYPE_NO_ISO_MASK | PB_migrate_isolate_bit)
>   #else
> +#define MIGRATETYPE_NO_ISO_MASK MIGRATETYPE_MASK
>   #define MIGRATETYPE_MASK (BIT(PB_migratetype_bits) - 1)
>   #endif
>   
> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
> index 898bb788243b..b0a2af0a5357 100644
> --- a/include/linux/page-isolation.h
> +++ b/include/linux/page-isolation.h
> @@ -26,9 +26,10 @@ static inline bool is_migrate_isolate(int migratetype)
>   #define REPORT_FAILURE	0x2
>   
>   void set_pageblock_migratetype(struct page *page, int migratetype);
> +void set_pageblock_isolate(struct page *page);
>   
> -bool move_freepages_block_isolate(struct zone *zone, struct page *page,
> -				  int migratetype);
> +bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
> +bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
>   
>   int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
>   			     int migratetype, int flags);
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index b1caedbade5b..c86c47bba019 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1178,6 +1178,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages,
>   	const int nid = zone_to_nid(zone);
>   	int ret;
>   	struct memory_notify arg;
> +	unsigned long isol_pfn;
>   
>   	/*
>   	 * {on,off}lining is constrained to full memory sections (or more
> @@ -1192,7 +1193,11 @@ int online_pages(unsigned long pfn, unsigned long nr_pages,
>   
>   
>   	/* associate pfn range with the zone */
> -	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
> +	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE);
> +	for (isol_pfn = pfn;
> +	     isol_pfn < pfn + nr_pages;
> +	     isol_pfn += pageblock_nr_pages)
> +		set_pageblock_isolate(pfn_to_page(isol_pfn));

Can we move that all the way into memmap_init_range(), where we do the
set_pageblock_migratetype()?

The MIGRATE_UNMOVABLE in mhp_init_memmap_on_memory() is likely fine: all
pages in that pageblock will be used for the memmap. Everything is unmovable,
but no free pages so ... nobody cares? :)


diff --git a/mm/internal.h b/mm/internal.h
index 6b8ed20177432..bc102846fcf1f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -821,7 +821,7 @@ extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
  			  int nid, bool exact_nid);
  
  void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
-		unsigned long, enum meminit_context, struct vmem_altmap *, int);
+		unsigned long, enum meminit_context, struct vmem_altmap *, bool);
  
  #if defined CONFIG_COMPACTION || defined CONFIG_CMA
  
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b1caedbade5b1..4b2cf20ad21fb 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -764,13 +764,13 @@ static inline void section_taint_zone_device(unsigned long pfn)
   * and resizing the pgdat/zone data to span the added pages. After this
   * call, all affected pages are PageOffline().
   *
- * All aligned pageblocks are initialized to the specified migratetype
- * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
- * zone stats (e.g., nr_isolate_pageblock) are touched.
+ * All aligned pageblocks are initialized to MIGRATE_MOVABLE, and are isolated
+ * if requested. Besides setting the migratetype, no related zone stats (e.g.,
+ * nr_isolate_pageblock) are touched.
   */
  void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
  				  unsigned long nr_pages,
-				  struct vmem_altmap *altmap, int migratetype)
+				  struct vmem_altmap *altmap, bool isolate)
  {
  	struct pglist_data *pgdat = zone->zone_pgdat;
  	int nid = pgdat->node_id;
@@ -802,7 +802,7 @@ void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
  	 * are reserved so nobody should be touching them so we should be safe
  	 */
  	memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
-			 MEMINIT_HOTPLUG, altmap, migratetype);
+			 MEMINIT_HOTPLUG, altmap, isolate);
  
  	set_zone_contiguous(zone);
  }
@@ -1127,7 +1127,7 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
  	if (mhp_off_inaccessible)
  		page_init_poison(pfn_to_page(pfn), sizeof(struct page) * nr_pages);
  
-	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
+	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, false);
  
  	for (i = 0; i < nr_pages; i++) {
  		struct page *page = pfn_to_page(pfn + i);
@@ -1192,7 +1192,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages,
  
  
  	/* associate pfn range with the zone */
-	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
+	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, true);
  
  	arg.start_pfn = pfn;
  	arg.nr_pages = nr_pages;
diff --git a/mm/memremap.c b/mm/memremap.c
index c417c843e9b1f..e47f6809f254b 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -254,7 +254,7 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
  		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
  		move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
  				PHYS_PFN(range_len(range)), params->altmap,
-				MIGRATE_MOVABLE);
+				false);
  	}
  
  	mem_hotplug_done();
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 1c5444e188f82..041106fc524be 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -867,14 +867,14 @@ static void __init init_unavailable_range(unsigned long spfn,
   * up by memblock_free_all() once the early boot process is
   * done. Non-atomic initialization, single-pass.
   *
- * All aligned pageblocks are initialized to the specified migratetype
- * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
- * zone stats (e.g., nr_isolate_pageblock) are touched.
+ * All aligned pageblocks are initialized to MIGRATE_MOVABLE, and are isolated
+ * if requested. Besides setting the migratetype, no related zone stats (e.g.,
+ * nr_isolate_pageblock) are touched.
   */
  void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
  		unsigned long start_pfn, unsigned long zone_end_pfn,
  		enum meminit_context context,
-		struct vmem_altmap *altmap, int migratetype)
+		struct vmem_altmap *altmap, bool isolate)
  {
  	unsigned long pfn, end_pfn = start_pfn + size;
  	struct page *page;
@@ -931,7 +931,9 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
  		 * over the place during system boot.
  		 */
  		if (pageblock_aligned(pfn)) {
-			set_pageblock_migratetype(page, migratetype);
+			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+			if (isolate)
+				set_pageblock_isolate(page, isolate)
  			cond_resched();
  		}
  		pfn++;
@@ -954,7 +956,7 @@ static void __init memmap_init_zone_range(struct zone *zone,
  		return;
  
  	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
-			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
+			  zone_end_pfn, MEMINIT_EARLY, NULL, false);
  
  	if (*hole_pfn < start_pfn)
  		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
-- 
2.49.0



As an alterantive, a second "isolate" parameter and make sure that migratetype is
never MIGRATE_ISOLATE.

[...]

> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -25,6 +25,12 @@ static inline void clear_pageblock_isolate(struct page *page)
>   	set_pfnblock_flags_mask(page, 0, page_to_pfn(page),
>   			PB_migrate_isolate_bit);
>   }
> +void set_pageblock_isolate(struct page *page)
> +{
> +	set_pfnblock_flags_mask(page, PB_migrate_isolate_bit,
> +			page_to_pfn(page),
> +			PB_migrate_isolate_bit);
> +}

Probably better placed in the previous patch, and in the header (see comment to #1).

-- 
Cheers,

David / dhildenb


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ