lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 4 Apr 2019 15:18:00 +0200
From:   David Hildenbrand <david@...hat.com>
To:     Oscar Salvador <osalvador@...e.de>, akpm@...ux-foundation.org
Cc:     mhocko@...e.com, dan.j.williams@...el.com,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH 1/2] mm, memory_hotplug: cleanup memory offline path

On 04.04.19 14:59, Oscar Salvador wrote:
> From: Michal Hocko <mhocko@...e.com>
> 
> check_pages_isolated_cb currently accounts the whole pfn range as being
> offlined if test_pages_isolated suceeds on the range. This is based on
> the assumption that all pages in the range are freed which is currently
> the case in most cases but it won't be with later changes, as pages
> marked as vmemmap won't be isolated.
> 
> Move the offlined pages counting to offline_isolated_pages_cb and
> rely on __offline_isolated_pages to return the correct value.
> check_pages_isolated_cb will still do it's primary job and check the pfn
> range.
> 
> While we are at it remove check_pages_isolated and offline_isolated_pages
> and use directly walk_system_ram_range as do in online_pages.
> 
> Signed-off-by: Michal Hocko <mhocko@...e.com>
> Signed-off-by: Oscar Salvador <osalvador@...e.de>
> ---
>  include/linux/memory_hotplug.h |  3 ++-
>  mm/memory_hotplug.c            | 46 +++++++++++-------------------------------
>  mm/page_alloc.c                | 11 ++++++++--
>  3 files changed, 23 insertions(+), 37 deletions(-)
> 
> diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
> index 8ade08c50d26..3c8cf347804c 100644
> --- a/include/linux/memory_hotplug.h
> +++ b/include/linux/memory_hotplug.h
> @@ -87,7 +87,8 @@ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
>  extern int online_pages(unsigned long, unsigned long, int);
>  extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
>  	unsigned long *valid_start, unsigned long *valid_end);
> -extern void __offline_isolated_pages(unsigned long, unsigned long);
> +extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
> +						unsigned long end_pfn);
>  
>  typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
>  
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index f206b8b66af1..d8a3e9554aec 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1451,15 +1451,11 @@ static int
>  offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
>  			void *data)
>  {
> -	__offline_isolated_pages(start, start + nr_pages);
> -	return 0;
> -}
> +	unsigned long offlined_pages;
>  
> -static void
> -offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
> -{
> -	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
> -				offline_isolated_pages_cb);
> +	offlined_pages = __offline_isolated_pages(start, start + nr_pages);
> +	*(unsigned long *)data += offlined_pages;

unsigned long *offlined_pages = data;

*offlined_pages += __offline_isolated_pages(start, start + nr_pages);

> +	return 0;
>  }
>  
>  /*
> @@ -1469,26 +1465,7 @@ static int
>  check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
>  			void *data)
>  {
> -	int ret;
> -	long offlined = *(long *)data;
> -	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
> -	offlined = nr_pages;
> -	if (!ret)
> -		*(long *)data += offlined;
> -	return ret;
> -}
> -
> -static long
> -check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
> -{
> -	long offlined = 0;
> -	int ret;
> -
> -	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
> -			check_pages_isolated_cb);
> -	if (ret < 0)
> -		offlined = (long)ret;
> -	return offlined;
> +	return test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
>  }
>  
>  static int __init cmdline_parse_movable_node(char *p)
> @@ -1573,7 +1550,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
>  		  unsigned long end_pfn)
>  {
>  	unsigned long pfn, nr_pages;
> -	long offlined_pages;
> +	unsigned long offlined_pages = 0;
>  	int ret, node, nr_isolate_pageblock;
>  	unsigned long flags;
>  	unsigned long valid_start, valid_end;
> @@ -1649,14 +1626,15 @@ static int __ref __offline_pages(unsigned long start_pfn,
>  			goto failed_removal_isolated;
>  		}
>  		/* check again */
> -		offlined_pages = check_pages_isolated(start_pfn, end_pfn);
> -	} while (offlined_pages < 0);
> +		ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
> +							check_pages_isolated_cb);

indentation looks strange, but might be my mail client.

> +	} while (ret);
>  
> -	pr_info("Offlined Pages %ld\n", offlined_pages);
>  	/* Ok, all of our target is isolated.
>  	   We cannot do rollback at this point. */
> -	offline_isolated_pages(start_pfn, end_pfn);
> -
> +	walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined_pages,
> +						offline_isolated_pages_cb);

dito

> +	pr_info("Offlined Pages %ld\n", offlined_pages);
>  	/*
>  	 * Onlining will reset pagetype flags and makes migrate type
>  	 * MOVABLE, so just need to decrease the number of isolated
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 0c53807a2943..d36ca67064c9 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -8375,7 +8375,7 @@ void zone_pcp_reset(struct zone *zone)
>   * All pages in the range must be in a single zone and isolated
>   * before calling this.
>   */
> -void
> +unsigned long
>  __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
>  {
>  	struct page *page;
> @@ -8383,12 +8383,15 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
>  	unsigned int order, i;
>  	unsigned long pfn;
>  	unsigned long flags;
> +	unsigned long offlined_pages = 0;
> +
>  	/* find the first valid pfn */
>  	for (pfn = start_pfn; pfn < end_pfn; pfn++)
>  		if (pfn_valid(pfn))
>  			break;
>  	if (pfn == end_pfn)
> -		return;
> +		return offlined_pages;
> +
>  	offline_mem_sections(pfn, end_pfn);
>  	zone = page_zone(pfn_to_page(pfn));
>  	spin_lock_irqsave(&zone->lock, flags);
> @@ -8406,12 +8409,14 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
>  		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
>  			pfn++;
>  			SetPageReserved(page);
> +			offlined_pages++;
>  			continue;
>  		}
>  
>  		BUG_ON(page_count(page));
>  		BUG_ON(!PageBuddy(page));
>  		order = page_order(page);
> +		offlined_pages += 1 << order;
>  #ifdef CONFIG_DEBUG_VM
>  		pr_info("remove from free list %lx %d %lx\n",
>  			pfn, 1 << order, end_pfn);
> @@ -8422,6 +8427,8 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
>  		pfn += (1 << order);
>  	}
>  	spin_unlock_irqrestore(&zone->lock, flags);
> +
> +	return offlined_pages;
>  }
>  #endif
>  
> 


Only nits

Reviewed-by: David Hildenbrand <david@...hat.com>

-- 

Thanks,

David / dhildenb

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ