[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YgoykFBUnEJ6Ynro@kernel.org>
Date: Mon, 14 Feb 2022 12:44:32 +0200
From: Mike Rapoport <rppt@...nel.org>
To: Zi Yan <ziy@...dia.com>
Cc: David Hildenbrand <david@...hat.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Michael Ellerman <mpe@...erman.id.au>,
Christoph Hellwig <hch@....de>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Robin Murphy <robin.murphy@....com>,
linuxppc-dev@...ts.ozlabs.org,
virtualization@...ts.linux-foundation.org,
iommu@...ts.linux-foundation.org, Vlastimil Babka <vbabka@...e.cz>,
Mel Gorman <mgorman@...hsingularity.net>,
Eric Ren <renzhengeek@...il.com>,
Oscar Salvador <osalvador@...e.de>
Subject: Re: [PATCH v5 1/6] mm: page_isolation: move has_unmovable_pages() to
mm/page_isolation.c
On Fri, Feb 11, 2022 at 11:41:30AM -0500, Zi Yan wrote:
> From: Zi Yan <ziy@...dia.com>
>
> has_unmovable_pages() is only used in mm/page_isolation.c. Move it from
> mm/page_alloc.c and make it static.
>
> Signed-off-by: Zi Yan <ziy@...dia.com>
> Reviewed-by: Oscar Salvador <osalvador@...e.de>
Reviewed-by: Mike Rapoport <rppt@...ux.ibm.com>
> ---
> include/linux/page-isolation.h | 2 -
> mm/page_alloc.c | 119 ---------------------------------
> mm/page_isolation.c | 119 +++++++++++++++++++++++++++++++++
> 3 files changed, 119 insertions(+), 121 deletions(-)
>
> diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
> index 572458016331..e14eddf6741a 100644
> --- a/include/linux/page-isolation.h
> +++ b/include/linux/page-isolation.h
> @@ -33,8 +33,6 @@ static inline bool is_migrate_isolate(int migratetype)
> #define MEMORY_OFFLINE 0x1
> #define REPORT_FAILURE 0x2
>
> -struct page *has_unmovable_pages(struct zone *zone, struct page *page,
> - int migratetype, int flags);
> void set_pageblock_migratetype(struct page *page, int migratetype);
> int move_freepages_block(struct zone *zone, struct page *page,
> int migratetype, int *num_movable);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index cface1d38093..e2c6a67fc386 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -8962,125 +8962,6 @@ void *__init alloc_large_system_hash(const char *tablename,
> return table;
> }
>
> -/*
> - * This function checks whether pageblock includes unmovable pages or not.
> - *
> - * PageLRU check without isolation or lru_lock could race so that
> - * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
> - * check without lock_page also may miss some movable non-lru pages at
> - * race condition. So you can't expect this function should be exact.
> - *
> - * Returns a page without holding a reference. If the caller wants to
> - * dereference that page (e.g., dumping), it has to make sure that it
> - * cannot get removed (e.g., via memory unplug) concurrently.
> - *
> - */
> -struct page *has_unmovable_pages(struct zone *zone, struct page *page,
> - int migratetype, int flags)
> -{
> - unsigned long iter = 0;
> - unsigned long pfn = page_to_pfn(page);
> - unsigned long offset = pfn % pageblock_nr_pages;
> -
> - if (is_migrate_cma_page(page)) {
> - /*
> - * CMA allocations (alloc_contig_range) really need to mark
> - * isolate CMA pageblocks even when they are not movable in fact
> - * so consider them movable here.
> - */
> - if (is_migrate_cma(migratetype))
> - return NULL;
> -
> - return page;
> - }
> -
> - for (; iter < pageblock_nr_pages - offset; iter++) {
> - page = pfn_to_page(pfn + iter);
> -
> - /*
> - * Both, bootmem allocations and memory holes are marked
> - * PG_reserved and are unmovable. We can even have unmovable
> - * allocations inside ZONE_MOVABLE, for example when
> - * specifying "movablecore".
> - */
> - if (PageReserved(page))
> - return page;
> -
> - /*
> - * If the zone is movable and we have ruled out all reserved
> - * pages then it should be reasonably safe to assume the rest
> - * is movable.
> - */
> - if (zone_idx(zone) == ZONE_MOVABLE)
> - continue;
> -
> - /*
> - * Hugepages are not in LRU lists, but they're movable.
> - * THPs are on the LRU, but need to be counted as #small pages.
> - * We need not scan over tail pages because we don't
> - * handle each tail page individually in migration.
> - */
> - if (PageHuge(page) || PageTransCompound(page)) {
> - struct page *head = compound_head(page);
> - unsigned int skip_pages;
> -
> - if (PageHuge(page)) {
> - if (!hugepage_migration_supported(page_hstate(head)))
> - return page;
> - } else if (!PageLRU(head) && !__PageMovable(head)) {
> - return page;
> - }
> -
> - skip_pages = compound_nr(head) - (page - head);
> - iter += skip_pages - 1;
> - continue;
> - }
> -
> - /*
> - * We can't use page_count without pin a page
> - * because another CPU can free compound page.
> - * This check already skips compound tails of THP
> - * because their page->_refcount is zero at all time.
> - */
> - if (!page_ref_count(page)) {
> - if (PageBuddy(page))
> - iter += (1 << buddy_order(page)) - 1;
> - continue;
> - }
> -
> - /*
> - * The HWPoisoned page may be not in buddy system, and
> - * page_count() is not 0.
> - */
> - if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
> - continue;
> -
> - /*
> - * We treat all PageOffline() pages as movable when offlining
> - * to give drivers a chance to decrement their reference count
> - * in MEM_GOING_OFFLINE in order to indicate that these pages
> - * can be offlined as there are no direct references anymore.
> - * For actually unmovable PageOffline() where the driver does
> - * not support this, we will fail later when trying to actually
> - * move these pages that still have a reference count > 0.
> - * (false negatives in this function only)
> - */
> - if ((flags & MEMORY_OFFLINE) && PageOffline(page))
> - continue;
> -
> - if (__PageMovable(page) || PageLRU(page))
> - continue;
> -
> - /*
> - * If there are RECLAIMABLE pages, we need to check
> - * it. But now, memory offline itself doesn't call
> - * shrink_node_slabs() and it still to be fixed.
> - */
> - return page;
> - }
> - return NULL;
> -}
> -
> #ifdef CONFIG_CONTIG_ALLOC
> static unsigned long pfn_max_align_down(unsigned long pfn)
> {
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index f67c4c70f17f..b34f1310aeaa 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -15,6 +15,125 @@
> #define CREATE_TRACE_POINTS
> #include <trace/events/page_isolation.h>
>
> +/*
> + * This function checks whether pageblock includes unmovable pages or not.
> + *
> + * PageLRU check without isolation or lru_lock could race so that
> + * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
> + * check without lock_page also may miss some movable non-lru pages at
> + * race condition. So you can't expect this function should be exact.
> + *
> + * Returns a page without holding a reference. If the caller wants to
> + * dereference that page (e.g., dumping), it has to make sure that it
> + * cannot get removed (e.g., via memory unplug) concurrently.
> + *
> + */
> +static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
> + int migratetype, int flags)
> +{
> + unsigned long iter = 0;
> + unsigned long pfn = page_to_pfn(page);
> + unsigned long offset = pfn % pageblock_nr_pages;
> +
> + if (is_migrate_cma_page(page)) {
> + /*
> + * CMA allocations (alloc_contig_range) really need to mark
> + * isolate CMA pageblocks even when they are not movable in fact
> + * so consider them movable here.
> + */
> + if (is_migrate_cma(migratetype))
> + return NULL;
> +
> + return page;
> + }
> +
> + for (; iter < pageblock_nr_pages - offset; iter++) {
> + page = pfn_to_page(pfn + iter);
> +
> + /*
> + * Both, bootmem allocations and memory holes are marked
> + * PG_reserved and are unmovable. We can even have unmovable
> + * allocations inside ZONE_MOVABLE, for example when
> + * specifying "movablecore".
> + */
> + if (PageReserved(page))
> + return page;
> +
> + /*
> + * If the zone is movable and we have ruled out all reserved
> + * pages then it should be reasonably safe to assume the rest
> + * is movable.
> + */
> + if (zone_idx(zone) == ZONE_MOVABLE)
> + continue;
> +
> + /*
> + * Hugepages are not in LRU lists, but they're movable.
> + * THPs are on the LRU, but need to be counted as #small pages.
> + * We need not scan over tail pages because we don't
> + * handle each tail page individually in migration.
> + */
> + if (PageHuge(page) || PageTransCompound(page)) {
> + struct page *head = compound_head(page);
> + unsigned int skip_pages;
> +
> + if (PageHuge(page)) {
> + if (!hugepage_migration_supported(page_hstate(head)))
> + return page;
> + } else if (!PageLRU(head) && !__PageMovable(head)) {
> + return page;
> + }
> +
> + skip_pages = compound_nr(head) - (page - head);
> + iter += skip_pages - 1;
> + continue;
> + }
> +
> + /*
> + * We can't use page_count without pin a page
> + * because another CPU can free compound page.
> + * This check already skips compound tails of THP
> + * because their page->_refcount is zero at all time.
> + */
> + if (!page_ref_count(page)) {
> + if (PageBuddy(page))
> + iter += (1 << buddy_order(page)) - 1;
> + continue;
> + }
> +
> + /*
> + * The HWPoisoned page may be not in buddy system, and
> + * page_count() is not 0.
> + */
> + if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
> + continue;
> +
> + /*
> + * We treat all PageOffline() pages as movable when offlining
> + * to give drivers a chance to decrement their reference count
> + * in MEM_GOING_OFFLINE in order to indicate that these pages
> + * can be offlined as there are no direct references anymore.
> + * For actually unmovable PageOffline() where the driver does
> + * not support this, we will fail later when trying to actually
> + * move these pages that still have a reference count > 0.
> + * (false negatives in this function only)
> + */
> + if ((flags & MEMORY_OFFLINE) && PageOffline(page))
> + continue;
> +
> + if (__PageMovable(page) || PageLRU(page))
> + continue;
> +
> + /*
> + * If there are RECLAIMABLE pages, we need to check
> + * it. But now, memory offline itself doesn't call
> + * shrink_node_slabs() and it still to be fixed.
> + */
> + return page;
> + }
> + return NULL;
> +}
> +
> static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
> {
> struct zone *zone = page_zone(page);
> --
> 2.34.1
>
--
Sincerely yours,
Mike.
Powered by blists - more mailing lists