lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191128101524.GH26807@dhcp22.suse.cz>
Date:   Thu, 28 Nov 2019 11:15:24 +0100
From:   Michal Hocko <mhocko@...nel.org>
To:     David Hildenbrand <david@...hat.com>
Cc:     linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        Andrew Morton <akpm@...ux-foundation.org>,
        Oscar Salvador <osalvador@...e.de>
Subject: Re: [PATCH v1] mm/memory_hotplug: don't check the nid in
 find_(smallest|biggest)_section_pfn

On Wed 27-11-19 18:41:58, David Hildenbrand wrote:
> Now that we always check against a zone, we can stop checking against
> the nid, it is implicitly covered by the zone.
> 
> Cc: Andrew Morton <akpm@...ux-foundation.org>
> Cc: Michal Hocko <mhocko@...nel.org>
> Cc: Oscar Salvador <osalvador@...e.de>
> Signed-off-by: David Hildenbrand <david@...hat.com>

OK, this makes some sense to me. The node really is superfluous and it
doesn't add any clarity. Quite contrary it just brings question why do
we check it as well. If there ever is a need to check for the node then
we have it available in struct zone and that would be much more robust
approach because an accidental mismatch between parameters is ruled out.

Acked-by: Michal Hocko <mhocko@...e.com>

> ---
>  mm/memory_hotplug.c | 23 ++++++++---------------
>  1 file changed, 8 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 46b2e056a43f..602f753c662c 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -344,17 +344,14 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
>  }
>  
>  /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
> -static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
> -				     unsigned long start_pfn,
> -				     unsigned long end_pfn)
> +static unsigned long find_smallest_section_pfn(struct zone *zone,
> +					       unsigned long start_pfn,
> +					       unsigned long end_pfn)
>  {
>  	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
>  		if (unlikely(!pfn_to_online_page(start_pfn)))
>  			continue;
>  
> -		if (unlikely(pfn_to_nid(start_pfn) != nid))
> -			continue;
> -
>  		if (zone != page_zone(pfn_to_page(start_pfn)))
>  			continue;
>  
> @@ -365,9 +362,9 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
>  }
>  
>  /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
> -static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
> -				    unsigned long start_pfn,
> -				    unsigned long end_pfn)
> +static unsigned long find_biggest_section_pfn(struct zone *zone,
> +					      unsigned long start_pfn,
> +					      unsigned long end_pfn)
>  {
>  	unsigned long pfn;
>  
> @@ -377,9 +374,6 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
>  		if (unlikely(!pfn_to_online_page(pfn)))
>  			continue;
>  
> -		if (unlikely(pfn_to_nid(pfn) != nid))
> -			continue;
> -
>  		if (zone != page_zone(pfn_to_page(pfn)))
>  			continue;
>  
> @@ -393,7 +387,6 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
>  			     unsigned long end_pfn)
>  {
>  	unsigned long pfn;
> -	int nid = zone_to_nid(zone);
>  
>  	zone_span_writelock(zone);
>  	if (zone->zone_start_pfn == start_pfn) {
> @@ -403,7 +396,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
>  		 * In this case, we find second smallest valid mem_section
>  		 * for shrinking zone.
>  		 */
> -		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
> +		pfn = find_smallest_section_pfn(zone, end_pfn,
>  						zone_end_pfn(zone));
>  		if (pfn) {
>  			zone->spanned_pages = zone_end_pfn(zone) - pfn;
> @@ -419,7 +412,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
>  		 * In this case, we find second biggest valid mem_section for
>  		 * shrinking zone.
>  		 */
> -		pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
> +		pfn = find_biggest_section_pfn(zone, zone->zone_start_pfn,
>  					       start_pfn);
>  		if (pfn)
>  			zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
> -- 
> 2.21.0

-- 
Michal Hocko
SUSE Labs

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ