lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 2 Sep 2022 10:20:49 +0200
From:   David Hildenbrand <david@...hat.com>
To:     Kefeng Wang <wangkefeng.wang@...wei.com>,
        Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org
Cc:     Mike Rapoport <rppt@...nel.org>,
        Oscar Salvador <osalvador@...e.de>,
        linux-kernel@...r.kernel.org, Vlastimil Babka <vbabka@...e.cz>
Subject: Re: [PATCH 1/2] mm: reuse pageblock_start/end_pfn() macro

On 02.09.22 08:47, Kefeng Wang wrote:
> Move pageblock_start_pfn/pageblock_end_pfn() into pageblock-flags.h,
> then they could be used somewhere else, not only in compaction.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@...wei.com>
> ---
>  include/linux/pageblock-flags.h |  2 ++
>  mm/compaction.c                 |  2 --
>  mm/memblock.c                   |  2 +-
>  mm/page_alloc.c                 | 13 ++++++-------
>  mm/page_isolation.c             |  2 +-
>  mm/page_owner.c                 |  4 ++--
>  6 files changed, 12 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
> index 83c7248053a1..ef2e17e312ae 100644
> --- a/include/linux/pageblock-flags.h
> +++ b/include/linux/pageblock-flags.h
> @@ -53,6 +53,8 @@ extern unsigned int pageblock_order;
>  #endif /* CONFIG_HUGETLB_PAGE */
>  
>  #define pageblock_nr_pages	(1UL << pageblock_order)
> +#define pageblock_start_pfn(pfn)	round_down(pfn, pageblock_nr_pages)
> +#define pageblock_end_pfn(pfn)		ALIGN((pfn + 1), pageblock_nr_pages)
>  

I'd naturally have paired ALIGN with ALIGN_DOWN -- or round_up with round_down.
(You're replacing one instance where ALIGN_DOWN was used at least.)

But maybe there is an obvious reason that I am missing :)

>  /* Forward declaration */
>  struct page;
> diff --git a/mm/compaction.c b/mm/compaction.c
> index f72907c7cfef..65bef5f78897 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -52,8 +52,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
>  
>  #define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
>  #define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
> -#define pageblock_start_pfn(pfn)	block_start_pfn(pfn, pageblock_order)
> -#define pageblock_end_pfn(pfn)		block_end_pfn(pfn, pageblock_order)
>  
>  /*
>   * Page order with-respect-to which proactive compaction
> diff --git a/mm/memblock.c b/mm/memblock.c
> index b5d3026979fc..46fe7575f03c 100644
> --- a/mm/memblock.c
> +++ b/mm/memblock.c
> @@ -2000,7 +2000,7 @@ static void __init free_unused_memmap(void)
>  		 * presume that there are no holes in the memory map inside
>  		 * a pageblock
>  		 */
> -		start = round_down(start, pageblock_nr_pages);
> +		start = pageblock_start_pfn(start);
>  
>  		/*
>  		 * If we had a previous bank, and there is a space
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 36b20215a3be..93339cc61f92 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -544,7 +544,7 @@ static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
>  #ifdef CONFIG_SPARSEMEM
>  	pfn &= (PAGES_PER_SECTION-1);
>  #else
> -	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
> +	pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
>  #endif /* CONFIG_SPARSEMEM */
>  	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
>  }
> @@ -1857,7 +1857,7 @@ void set_zone_contiguous(struct zone *zone)
>  	unsigned long block_start_pfn = zone->zone_start_pfn;
>  	unsigned long block_end_pfn;
>  
> -	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
> +	block_end_pfn = pageblock_end_pfn(block_start_pfn);
>  	for (; block_start_pfn < zone_end_pfn(zone);
>  			block_start_pfn = block_end_pfn,
>  			 block_end_pfn += pageblock_nr_pages) {
> @@ -2653,8 +2653,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
>  		*num_movable = 0;
>  
>  	pfn = page_to_pfn(page);
> -	start_pfn = pfn & ~(pageblock_nr_pages - 1);
> -	end_pfn = start_pfn + pageblock_nr_pages - 1;
> +	start_pfn = pageblock_start_pfn(pfn);
> +	end_pfn = pageblock_end_pfn(pfn) - 1;
>  
>  	/* Do not cross zone boundaries */
>  	if (!zone_spans_pfn(zone, start_pfn))
> @@ -6939,9 +6939,8 @@ static void __init init_unavailable_range(unsigned long spfn,
>  	u64 pgcnt = 0;
>  
>  	for (pfn = spfn; pfn < epfn; pfn++) {
> -		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
> -			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
> -				+ pageblock_nr_pages - 1;
> +		if (!pfn_valid(pageblock_start_pfn(pfn))) {
> +			pfn = pageblock_end_pfn(pfn) - 1;
>  			continue;
>  		}
>  		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index 9d73dc38e3d7..f2df4ad53cd6 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -172,7 +172,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
>  	 * to avoid redundant checks.
>  	 */
>  	check_unmovable_start = max(page_to_pfn(page), start_pfn);
> -	check_unmovable_end = min(ALIGN(page_to_pfn(page) + 1, pageblock_nr_pages),
> +	check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)),
>  				  end_pfn);
>  


There are some more cases that might need care as well:


mm/memblock.c:          prev_end = ALIGN(end, pageblock_nr_pages);
mm/memblock.c:          prev_end = ALIGN(end, pageblock_nr_pages);
mm/page_isolation.c:    VM_BUG_ON(ALIGN_DOWN(start_pfn, pageblock_nr_pages) !=
mm/page_isolation.c:              ALIGN_DOWN(end_pfn - 1, pageblock_nr_pages));
mm/page_isolation.c:    unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
mm/page_isolation.c:    unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
mm/page_isolation.c:    unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
mm/page_isolation.c:    unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);



-- 
Thanks,

David / dhildenb

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ