[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <e3276bb1-6d70-41c8-9d04-c7eade3dae95@suse.cz>
Date: Thu, 29 Aug 2024 17:12:54 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Huan Yang <link@...o.com>, Andrew Morton <akpm@...ux-foundation.org>,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: opensource.kernel@...o.com
Subject: Re: [PATCH] mm: page_alloc: simpify page del and expand
On 8/26/24 08:40, Huan Yang wrote:
> When page del from buddy and need expand, it will account free_pages
> in zone's migratetype.
>
> The current way is to subtract the page number of the current order
> when deleting, and then add it back when expanding.
>
> This is unnecessary, as when migrating the same type, we can directly
> record the difference between the high-order pages and the expand
> added, and then subtract it directly.
>
> This patch merge that, only when del and expand done, then account
> free_pages.
>
> Signed-off-by: Huan Yang <link@...o.com>
Reviewed-by: Vlastimil Babka <vbabka@...e.cz>
can't hurt to reduce the number of calls to __mod_zone_page_state() (via
account_freepages()) in the allocator
> ---
> mm/page_alloc.c | 35 +++++++++++++++++++++++++----------
> 1 file changed, 25 insertions(+), 10 deletions(-)
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 023132f66d29..900b0947c6e1 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -1363,11 +1363,11 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
> *
> * -- nyc
> */
> -static inline void expand(struct zone *zone, struct page *page,
> - int low, int high, int migratetype)
> +static inline unsigned int expand(struct zone *zone, struct page *page, int low,
> + int high, int migratetype)
> {
> - unsigned long size = 1 << high;
> - unsigned long nr_added = 0;
> + unsigned int size = 1 << high;
> + unsigned int nr_added = 0;
>
> while (high > low) {
> high--;
> @@ -1387,7 +1387,19 @@ static inline void expand(struct zone *zone, struct page *page,
> set_buddy_order(&page[size], high);
> nr_added += size;
> }
> - account_freepages(zone, nr_added, migratetype);
> +
> + return nr_added;
> +}
> +
> +static __always_inline void page_del_and_expand(struct zone *zone,
> + struct page *page, int low,
> + int high, int migratetype)
> +{
> + int nr_pages = 1 << high;
> +
> + __del_page_from_free_list(page, zone, high, migratetype);
> + nr_pages -= expand(zone, page, low, high, migratetype);
> + account_freepages(zone, -nr_pages, migratetype);
> }
>
> static void check_new_page_bad(struct page *page)
> @@ -1557,8 +1569,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
> page = get_page_from_free_area(area, migratetype);
> if (!page)
> continue;
> - del_page_from_free_list(page, zone, current_order, migratetype);
> - expand(zone, page, order, current_order, migratetype);
> +
> + page_del_and_expand(zone, page, order, current_order,
> + migratetype);
> trace_mm_page_alloc_zone_locked(page, order, migratetype,
> pcp_allowed_order(order) &&
> migratetype < MIGRATE_PCPTYPES);
> @@ -1888,9 +1901,12 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
>
> /* Take ownership for orders >= pageblock_order */
> if (current_order >= pageblock_order) {
> + unsigned int nr_added;
> +
> del_page_from_free_list(page, zone, current_order, block_type);
> change_pageblock_range(page, current_order, start_type);
> - expand(zone, page, order, current_order, start_type);
> + nr_added = expand(zone, page, order, current_order, start_type);
> + account_freepages(zone, nr_added, start_type);
> return page;
> }
>
> @@ -1943,8 +1959,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
> }
>
> single_page:
> - del_page_from_free_list(page, zone, current_order, block_type);
> - expand(zone, page, order, current_order, block_type);
> + page_del_and_expand(zone, page, order, current_order, block_type);
> return page;
> }
>
Powered by blists - more mailing lists