lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 13 Oct 2021 18:33:06 +0200
From:   David Hildenbrand <david@...hat.com>
To:     Kent Overstreet <kent.overstreet@...il.com>,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        akpm@...ux-foundation.org, linux-raid@...r.kernel.org,
        linux-block@...r.kernel.org, axboe@...nel.dk
Cc:     alexander.h.duyck@...ux.intel.com
Subject: Re: [PATCH 1/5] mm: Make free_area->nr_free per migratetype


Mostly LGTM. I recall that in some corner cases the migratetype stored
for a pcppage does not correspond to the pagetype of the pfnblock ... I
do wonder if that can trick us here in doing some accounting wrong., no
that we account free pages per mirgatetype.

>  	/*
>  	 * Set the pageblock if the isolated page is at least half of a
> @@ -6038,14 +6038,16 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
>  			struct free_area *area = &zone->free_area[order];
>  			int type;
>  
> -			nr[order] = area->nr_free;
> -			total += nr[order] << order;
> +			nr[order]	= 0;
> +			types[order]	= 0;

Why the indentation change? Looks unrelated to me.

>  
> -			types[order] = 0;
>  			for (type = 0; type < MIGRATE_TYPES; type++) {
>  				if (!free_area_empty(area, type))
>  					types[order] |= 1 << type;
> +				nr[order] += area->nr_free[type];
>  			}
> +
> +			total += nr[order] << order;
>  		}
>  		spin_unlock_irqrestore(&zone->lock, flags);
>  		for (order = 0; order < MAX_ORDER; order++) {
> @@ -6623,7 +6625,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
>  	unsigned int order, t;
>  	for_each_migratetype_order(order, t) {
>  		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
> -		zone->free_area[order].nr_free = 0;
> +		zone->free_area[order].nr_free[t] = 0;
>  	}
>  }
>  
> @@ -9317,6 +9319,7 @@ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
>  	struct page *page;
>  	struct zone *zone;
>  	unsigned int order;
> +	unsigned int migratetype;
>  	unsigned long flags;
>  
>  	offline_mem_sections(pfn, end_pfn);
> @@ -9346,7 +9349,8 @@ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
>  		BUG_ON(page_count(page));
>  		BUG_ON(!PageBuddy(page));
>  		order = buddy_order(page);
> -		del_page_from_free_list(page, zone, order);
> +		migratetype = get_pfnblock_migratetype(page, pfn);

As the free pages are isolated, theoretically this should be
MIGRATE_ISOLATE.

> +		del_page_from_free_list(page, zone, order, migratetype);
>  		pfn += (1 << order);
>  	}
>  	spin_unlock_irqrestore(&zone->lock, flags);
> @@ -9428,7 +9432,7 @@ bool take_page_off_buddy(struct page *page)
>  			int migratetype = get_pfnblock_migratetype(page_head,
>  								   pfn_head);
>  
> -			del_page_from_free_list(page_head, zone, page_order);
> +			del_page_from_free_list(page_head, zone, page_order, migratetype);
>  			break_down_buddy_pages(zone, page_head, page, 0,
>  						page_order, migratetype);
>  			if (!is_migrate_isolate(migratetype))
> diff --git a/mm/page_reporting.c b/mm/page_reporting.c
> index 382958eef8..4e45ae95db 100644
> --- a/mm/page_reporting.c
> +++ b/mm/page_reporting.c
> @@ -145,7 +145,7 @@ page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
>  	 * The division here should be cheap since PAGE_REPORTING_CAPACITY
>  	 * should always be a power of 2.
>  	 */
> -	budget = DIV_ROUND_UP(area->nr_free, PAGE_REPORTING_CAPACITY * 16);
> +	budget = DIV_ROUND_UP(area->nr_free[mt], PAGE_REPORTING_CAPACITY * 16);
>  

I think we might want the total free pages here. If we want to change
the behavior, we should do it in a separate patch.


-- 
Thanks,

David / dhildenb

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ