lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 10 Mar 2021 13:04:17 +0200
From:   Shay Agroskin <shayagr@...zon.com>
To:     Mel Gorman <mgorman@...hsingularity.net>
CC:     Andrew Morton <akpm@...ux-foundation.org>,
        Chuck Lever <chuck.lever@...cle.com>,
        Jesper Dangaard Brouer <brouer@...hat.com>,
        LKML <linux-kernel@...r.kernel.org>,
        Linux-Net <netdev@...r.kernel.org>,
        Linux-MM <linux-mm@...ck.org>,
        Linux-NFS <linux-nfs@...r.kernel.org>
Subject: Re: [PATCH 2/5] mm/page_alloc: Add a bulk page allocator


Mel Gorman <mgorman@...hsingularity.net> writes:

>
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index 8572a1474e16..4903d1cc48dc 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -515,6 +515,10 @@ static inline int 
> arch_make_page_accessible(struct page *page)
>  }
>  #endif
>  
> +int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int 
> preferred_nid,
> +				nodemask_t *nodemask, int 
> nr_pages,
> +				struct list_head *list);
> +
>  struct page *
>  __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int 
>  preferred_nid,
>  							nodemask_t 
>  *nodemask);
> @@ -525,6 +529,14 @@ __alloc_pages(gfp_t gfp_mask, unsigned int 
> order, int preferred_nid)
>  	return __alloc_pages_nodemask(gfp_mask, order, 
>  preferred_nid, NULL);
>  }
>  
> +/* Bulk allocate order-0 pages */
> +static inline unsigned long
> +alloc_pages_bulk(gfp_t gfp_mask, unsigned long nr_pages, struct 
> list_head *list)
> +{
> +	return __alloc_pages_bulk_nodemask(gfp_mask, 
> numa_mem_id(), NULL,
> +							nr_pages, 
> list);

Is the second line indentation intentional ? Why not align it to 
the first argument (gfp_mask) ?

> +}
> +
>  /*
>   * Allocate pages, preferring the node given as nid. The node 
>   must be valid and
>   * online. For more general interface, see alloc_pages_node().
> @@ -594,6 +606,7 @@ void * __meminit alloc_pages_exact_nid(int 
> nid, size_t size, gfp_t gfp_mask);
>  
>  extern void __free_pages(struct page *page, unsigned int 
>  order);
>  extern void free_pages(unsigned long addr, unsigned int order);
> +extern void free_pages_bulk(struct list_head *list);
>  
>  struct page_frag_cache;
>  extern void __page_frag_cache_drain(struct page *page, unsigned 
>  int count);
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 3e4b29ee2b1e..ff1e55793786 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4436,6 +4436,21 @@ static void wake_all_kswapds(unsigned int 
> order, gfp_t gfp_mask,
>  	}
>  }
> ...
>  
> +/*
> + * This is a batched version of the page allocator that 
> attempts to
> + * allocate nr_pages quickly from the preferred zone and add 
> them to list.
> + */
> +int __alloc_pages_bulk_nodemask(gfp_t gfp_mask, int 
> preferred_nid,
> +			nodemask_t *nodemask, int nr_pages,
> +			struct list_head *alloc_list)
> +{
> +	struct page *page;
> +	unsigned long flags;
> +	struct zone *zone;
> +	struct zoneref *z;
> +	struct per_cpu_pages *pcp;
> +	struct list_head *pcp_list;
> +	struct alloc_context ac;
> +	gfp_t alloc_mask;
> +	unsigned int alloc_flags;
> +	int alloced = 0;

Does alloced count the number of allocated pages ? Do you mind 
renaming it to 'allocated' ?

> +
> +	if (nr_pages == 1)
> +		goto failed;
> +
> +	/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 
> page. */
> +	if (!prepare_alloc_pages(gfp_mask, 0, preferred_nid, 
> nodemask, &ac, &alloc_mask, &alloc_flags))
> +		return 0;
> +	gfp_mask = alloc_mask;
> +
> +	/* Find an allowed local zone that meets the high 
> watermark. */
> +	for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, 
> ac.highest_zoneidx, ac.nodemask) {
> +		unsigned long mark;
> +
> +		if (cpusets_enabled() && (alloc_flags & 
> ALLOC_CPUSET) &&
> +		    !__cpuset_zone_allowed(zone, gfp_mask)) {
> +			continue;
> +		}
> +
> +		if (nr_online_nodes > 1 && zone != 
> ac.preferred_zoneref->zone &&
> +		    zone_to_nid(zone) != 
> zone_to_nid(ac.preferred_zoneref->zone)) {
> +			goto failed;
> +		}
> +
> +		mark = wmark_pages(zone, alloc_flags & 
> ALLOC_WMARK_MASK) + nr_pages;
> +		if (zone_watermark_fast(zone, 0,  mark,
> + 
> zonelist_zone_idx(ac.preferred_zoneref),
> +				alloc_flags, gfp_mask)) {
> +			break;
> +		}
> +	}
> +	if (!zone)
> +		return 0;
> +
> +	/* Attempt the batch allocation */
> +	local_irq_save(flags);
> +	pcp = &this_cpu_ptr(zone->pageset)->pcp;
> +	pcp_list = &pcp->lists[ac.migratetype];
> +
> +	while (alloced < nr_pages) {
> +		page = __rmqueue_pcplist(zone, ac.migratetype, 
> alloc_flags,
> + 
> pcp, pcp_list);

Same indentation comment as before

> +		if (!page)
> +			break;
> +
> +		prep_new_page(page, 0, gfp_mask, 0);
> +		list_add(&page->lru, alloc_list);
> +		alloced++;
> +	}
> +
> +	if (!alloced)
> +		goto failed_irq;
> +
> +	if (alloced) {
> +		__count_zid_vm_events(PGALLOC, zone_idx(zone), 
> alloced);
> +		zone_statistics(zone, zone);
> +	}
> +
> +	local_irq_restore(flags);
> +
> +	return alloced;
> +
> +failed_irq:
> +	local_irq_restore(flags);
> +
> +failed:
> +	page = __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, 
> nodemask);
> +	if (page) {
> +		alloced++;
> +		list_add(&page->lru, alloc_list);
> +	}
> +
> +	return alloced;
> +}
> +EXPORT_SYMBOL_GPL(__alloc_pages_bulk_nodemask);
> +
>  /*
>   * This is the 'heart' of the zoned buddy allocator.
>   */
> @@ -4981,8 +5092,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, 
> unsigned int order, int preferred_nid,
>  		return NULL;
>  	}
>  
> -	gfp_mask &= gfp_allowed_mask;
> -	alloc_mask = gfp_mask;
>  	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, 
>  nodemask, &ac, &alloc_mask, &alloc_flags))
>  		return NULL;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ