[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250508052409.GB320498@cmpxchg.org>
Date: Thu, 8 May 2025 01:24:09 -0400
From: Johannes Weiner <hannes@...xchg.org>
To: Zi Yan <ziy@...dia.com>
Cc: David Hildenbrand <david@...hat.com>, linux-mm@...ck.org,
Andrew Morton <akpm@...ux-foundation.org>,
Oscar Salvador <osalvador@...e.de>,
Vlastimil Babka <vbabka@...e.cz>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>,
Brendan Jackman <jackmanb@...gle.com>,
Richard Chang <richardycc@...gle.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 1/4] mm/page_isolation: make page isolation a
standalone bit.
On Wed, May 07, 2025 at 05:10:56PM -0400, Zi Yan wrote:
> During page isolation, the original migratetype is overwritten, since
> MIGRATE_* are enums and stored in pageblock bitmaps. Change
> MIGRATE_ISOLATE to be stored a standalone bit, PB_migrate_isolate, like
> PB_migrate_skip, so that migratetype is not lost during pageblock
> isolation. pageblock bits needs to be word aligned, so expand
> the number of pageblock bits from 4 to 8 and make PB_migrate_isolate bit 7.
>
> Signed-off-by: Zi Yan <ziy@...dia.com>
> ---
> include/linux/mmzone.h | 17 ++++++++++----
> include/linux/page-isolation.h | 2 +-
> include/linux/pageblock-flags.h | 33 +++++++++++++++++++++++++-
> mm/page_alloc.c | 41 ++++++++++++++++++++++++++++++++-
> 4 files changed, 86 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index b19a98c20de8..9ec022a0b826 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -106,14 +106,23 @@ static inline bool migratetype_is_mergeable(int mt)
>
> extern int page_group_by_mobility_disabled;
>
> -#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
> +#ifdef CONFIG_MEMORY_ISOLATION
> +#define MIGRATETYPE_MASK ((BIT(PB_migratetype_bits) - 1) | PB_migrate_isolate_bit)
> +#else
> +#define MIGRATETYPE_MASK (BIT(PB_migratetype_bits) - 1)
> +#endif
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +unsigned long get_pageblock_migratetype(const struct page *page);
> +#else
> #define get_pageblock_migratetype(page) \
> get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
>
> -#define folio_migratetype(folio) \
> - get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
> - MIGRATETYPE_MASK)
> +#endif
> +
> +#define folio_migratetype(folio) \
> + get_pageblock_migratetype(&folio->page)
> +
> struct free_area {
> struct list_head free_list[MIGRATE_TYPES];
> unsigned long nr_free;
> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
> index 898bb788243b..51797dc39cbc 100644
> --- a/include/linux/page-isolation.h
> +++ b/include/linux/page-isolation.h
> @@ -5,7 +5,7 @@
> #ifdef CONFIG_MEMORY_ISOLATION
> static inline bool is_migrate_isolate_page(struct page *page)
> {
> - return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
> + return get_pageblock_isolate(page);
The old version still works, right?
It would match is_migrate_isolate() a bit better, but no strong
feelings either way...
> static inline bool is_migrate_isolate(int migratetype)
> {
> diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
> index 0c4963339f0b..9fadae5892b2 100644
> --- a/include/linux/pageblock-flags.h
> +++ b/include/linux/pageblock-flags.h
> @@ -20,7 +20,10 @@ enum pageblock_bits {
> PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
> /* 3 bits required for migrate types */
> PB_migrate_skip,/* If set the block is skipped by compaction */
> -
> +#ifdef CONFIG_MEMORY_ISOLATION
> + PB_migrate_isolate = 7, /* If set the block is isolated */
> + /* set it to 7 to make pageblock bit word aligned */
> +#endif
> /*
> * Assume the bits will always align on a word. If this assumption
> * changes then get/set pageblock needs updating.
> @@ -28,6 +31,10 @@ enum pageblock_bits {
> NR_PAGEBLOCK_BITS
> };
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +#define PB_migrate_isolate_bit BIT(PB_migrate_isolate)
> +#endif
> +
> #if defined(CONFIG_PAGE_BLOCK_ORDER)
> #define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER
> #else
> @@ -105,4 +112,28 @@ static inline void set_pageblock_skip(struct page *page)
> }
> #endif /* CONFIG_COMPACTION */
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +#define get_pageblock_isolate(page) \
> + get_pfnblock_flags_mask(page, page_to_pfn(page), \
> + PB_migrate_isolate_bit)
> +#define clear_pageblock_isolate(page) \
> + set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
> + PB_migrate_isolate_bit)
> +#define set_pageblock_isolate(page) \
> + set_pfnblock_flags_mask(page, PB_migrate_isolate_bit, \
> + page_to_pfn(page), \
> + PB_migrate_isolate_bit)
Would it make sense to move these to page_isolation.c? Then they
wouldn't have to be macros.
> +#else
> +static inline bool get_pageblock_isolate(struct page *page)
> +{
> + return false;
> +}
> +static inline void clear_pageblock_isolate(struct page *page)
> +{
> +}
> +static inline void set_pageblock_isolate(struct page *page)
> +{
> +}
> +#endif /* CONFIG_MEMORY_ISOLATION */
> +
> #endif /* PAGEBLOCK_FLAGS_H */
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index c77592b22256..acf68ef041d8 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -381,12 +381,40 @@ unsigned long get_pfnblock_flags_mask(const struct page *page,
> return (word >> bitidx) & mask;
> }
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> +unsigned long get_pageblock_migratetype(const struct page *page)
> +{
> + unsigned long flags;
> +
> + flags = get_pfnblock_flags_mask(page, page_to_pfn(page),
> + MIGRATETYPE_MASK);
> + if (flags & PB_migrate_isolate_bit)
> + return MIGRATE_ISOLATE;
> +
> + return flags;
> +}
Since MIGRATETYPE_MASK includes the isolate bit if it exists, I think
you can share the get_pfnblock_flags_mask() part:
static inline get_pageblock_migratetype(const struct page *page)
{
unsigned long pfn = page_to_pfn(page);
flags = get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
#ifdef CONFIG_MEMORY_ISOLATEION
if (flags & PB_migrate_isolate_bit)
return MIGRATE_ISOLATE;
#endif
return flags;
}
> +static __always_inline int get_pfnblock_migratetype(const struct page *page,
> + unsigned long pfn)
> +{
> + unsigned long flags;
> +
> + flags = get_pfnblock_flags_mask(page, pfn,
> + MIGRATETYPE_MASK);
> + if (flags & PB_migrate_isolate_bit)
> + return MIGRATE_ISOLATE;
> +
> + return flags;
> +}
> +#else
> static __always_inline int get_pfnblock_migratetype(const struct page *page,
> unsigned long pfn)
> {
> return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
> }
Same with this one.
>
> +#endif
> +
> /**
> * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
> * @page: The page within the block of interest
> @@ -402,8 +430,14 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
> unsigned long bitidx, word_bitidx;
> unsigned long word;
>
> +#ifdef CONFIG_MEMORY_ISOLATION
> + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 8);
> + /* extra one for MIGRATE_ISOLATE */
> + BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits) + 1);
> +#else
> BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
> BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
> +#endif
>
> bitmap = get_pageblock_bitmap(page, pfn);
> bitidx = pfn_to_bitidx(page, pfn);
> @@ -426,7 +460,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
> migratetype < MIGRATE_PCPTYPES))
> migratetype = MIGRATE_UNMOVABLE;
>
> - set_pfnblock_flags_mask(page, (unsigned long)migratetype,
> +#ifdef CONFIG_MEMORY_ISOLATION
> + if (migratetype == MIGRATE_ISOLATE)
> + set_pageblock_isolate(page);
Are there paths actually doing this after the second patch?
There are many instances that want to *read* the migratetype or
MIGRATE_ISOLATE, but only isolation code should be manipulating that
bit through the dedicated set/toggle_pageblock_isolate API.
If there isn't one, it might be good to enforce this with a VM_WARN
instead.
> + else
> +#endif
> + set_pfnblock_flags_mask(page, (unsigned long)migratetype,
> page_to_pfn(page), MIGRATETYPE_MASK);
If the branch stays, you could add a `return' to the MIGRATE_ISOLATE
leg, drop the else and indent this line normally.
Powered by blists - more mailing lists