lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <18c9ab89-185b-395a-97ce-31940388df0e@nvidia.com>
Date:   Thu, 3 Dec 2020 00:17:02 -0800
From:   John Hubbard <jhubbard@...dia.com>
To:     Pavel Tatashin <pasha.tatashin@...een.com>,
        <linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>,
        <akpm@...ux-foundation.org>, <vbabka@...e.cz>, <mhocko@...e.com>,
        <david@...hat.com>, <osalvador@...e.de>,
        <dan.j.williams@...el.com>, <sashal@...nel.org>,
        <tyhicks@...ux.microsoft.com>, <iamjoonsoo.kim@....com>,
        <mike.kravetz@...cle.com>, <rostedt@...dmis.org>,
        <mingo@...hat.com>, <jgg@...pe.ca>, <peterz@...radead.org>,
        <mgorman@...e.de>, <willy@...radead.org>, <rientjes@...gle.com>
Subject: Re: [PATCH 5/6] mm: honor PF_MEMALLOC_NOMOVABLE for all allocations

On 12/1/20 9:23 PM, Pavel Tatashin wrote:
> PF_MEMALLOC_NOMOVABLE is only honored for CMA allocations, extend
> this flag to work for any allocations by removing __GFP_MOVABLE from
> gfp_mask when this flag is passed in the current context, thus
> prohibiting allocations from ZONE_MOVABLE.
> 
> Signed-off-by: Pavel Tatashin <pasha.tatashin@...een.com>
> ---
>   mm/hugetlb.c    |  2 +-
>   mm/page_alloc.c | 26 ++++++++++++++++----------
>   2 files changed, 17 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 02213c74ed6b..00e786201d8b 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1036,7 +1036,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
>   	bool nomovable = !!(current->flags & PF_MEMALLOC_NOMOVABLE);
>   
>   	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
> -		if (nomovable && is_migrate_cma_page(page))
> +		if (nomovable && is_migrate_movable(get_pageblock_migratetype(page)))


I wonder if we should add a helper, like is_migrate_cma_page(), that avoids having
to call get_pageblock_migratetype() at all of the callsites?


>   			continue;
>   
>   		if (PageHWPoison(page))
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 611799c72da5..7a6d86d0bc5f 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -3766,20 +3766,25 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
>   	return alloc_flags;
>   }
>   
> -static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
> -					unsigned int alloc_flags)
> +static inline unsigned int cma_alloc_flags(gfp_t gfp_mask,
> +					   unsigned int alloc_flags)

Actually, maybe the original name should be left intact. This handles current alloc
flags, which right now happen to only cover CMA flags, so the original name seems
accurate, right?


thanks,

John Hubbard
NVIDIA

>   {
>   #ifdef CONFIG_CMA
> -	unsigned int pflags = current->flags;
> -
> -	if (!(pflags & PF_MEMALLOC_NOMOVABLE) &&
> -	    gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
> +	if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
>   		alloc_flags |= ALLOC_CMA;
> -
>   #endif
>   	return alloc_flags;
>   }
>   
> +static inline gfp_t current_gfp_checkmovable(gfp_t gfp_mask)
> +{
> +	unsigned int pflags = current->flags;
> +
> +	if ((pflags & PF_MEMALLOC_NOMOVABLE))
> +		return gfp_mask & ~__GFP_MOVABLE;
> +	return gfp_mask;
> +}
> +
>   /*
>    * get_page_from_freelist goes through the zonelist trying to allocate
>    * a page.
> @@ -4423,7 +4428,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
>   	} else if (unlikely(rt_task(current)) && !in_interrupt())
>   		alloc_flags |= ALLOC_HARDER;
>   
> -	alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
> +	alloc_flags = cma_alloc_flags(gfp_mask, alloc_flags);
>   
>   	return alloc_flags;
>   }
> @@ -4725,7 +4730,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>   
>   	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
>   	if (reserve_flags)
> -		alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
> +		alloc_flags = cma_alloc_flags(gfp_mask, reserve_flags);
>   
>   	/*
>   	 * Reset the nodemask and zonelist iterators if memory policies can be
> @@ -4894,7 +4899,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
>   	if (should_fail_alloc_page(gfp_mask, order))
>   		return false;
>   
> -	*alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
> +	*alloc_flags = cma_alloc_flags(gfp_mask, *alloc_flags);
>   
>   	/* Dirty zone balancing only done in the fast path */
>   	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
> @@ -4932,6 +4937,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
>   	}
>   
>   	gfp_mask &= gfp_allowed_mask;
> +	gfp_mask = current_gfp_checkmovable(gfp_mask);
>   	alloc_mask = gfp_mask;
>   	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
>   		return NULL;
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ