[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200715082812.GD5451@dhcp22.suse.cz>
Date: Wed, 15 Jul 2020 10:28:12 +0200
From: Michal Hocko <mhocko@...nel.org>
To: js1304@...il.com
Cc: Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, kernel-team@....com,
Vlastimil Babka <vbabka@...e.cz>,
Christoph Hellwig <hch@...radead.org>,
Roman Gushchin <guro@...com>,
Mike Kravetz <mike.kravetz@...cle.com>,
Naoya Horiguchi <n-horiguchi@...jp.nec.com>,
"Aneesh Kumar K . V" <aneesh.kumar@...ux.ibm.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>, stable@...r.kernel.org
Subject: Re: [PATCH 1/4] mm/page_alloc: fix non cma alloc context
On Wed 15-07-20 14:05:26, Joonsoo Kim wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@....com>
>
> Currently, preventing cma area in page allocation is implemented by using
> current_gfp_context(). However, there are two problems of this
> implementation.
>
> First, this doesn't work for allocation fastpath. In the fastpath,
> original gfp_mask is used since current_gfp_context() is introduced in
> order to control reclaim and it is on slowpath.
> Second, clearing __GFP_MOVABLE has a side effect to exclude the memory
> on the ZONE_MOVABLE for allocation target.
This can be especially a problem with movable_node configurations where
a large portion of the memory is in movable zones.
> To fix these problems, this patch changes the implementation to exclude
> cma area in page allocation. Main point of this change is using the
> alloc_flags. alloc_flags is mainly used to control allocation so it fits
> for excluding cma area in allocation.
The approach is sensible and the patch makes sense to me from a quick
glance but I am not really familiar with all subtle details about cma
integration with the allocator so I do not feel confident to provide my
ack.
Thanks!
> Fixes: d7fefcc (mm/cma: add PF flag to force non cma alloc)
> Cc: <stable@...r.kernel.org>
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
> ---
> include/linux/sched/mm.h | 4 ----
> mm/page_alloc.c | 27 +++++++++++++++------------
> 2 files changed, 15 insertions(+), 16 deletions(-)
>
> diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> index 44ad5b7..a73847a 100644
> --- a/include/linux/sched/mm.h
> +++ b/include/linux/sched/mm.h
> @@ -191,10 +191,6 @@ static inline gfp_t current_gfp_context(gfp_t flags)
> flags &= ~(__GFP_IO | __GFP_FS);
> else if (pflags & PF_MEMALLOC_NOFS)
> flags &= ~__GFP_FS;
> -#ifdef CONFIG_CMA
> - if (pflags & PF_MEMALLOC_NOCMA)
> - flags &= ~__GFP_MOVABLE;
> -#endif
> }
> return flags;
> }
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 6416d08..cd53894 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -2791,7 +2791,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
> * allocating from CMA when over half of the zone's free memory
> * is in the CMA area.
> */
> - if (migratetype == MIGRATE_MOVABLE &&
> + if (alloc_flags & ALLOC_CMA &&
> zone_page_state(zone, NR_FREE_CMA_PAGES) >
> zone_page_state(zone, NR_FREE_PAGES) / 2) {
> page = __rmqueue_cma_fallback(zone, order);
> @@ -2802,7 +2802,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
> retry:
> page = __rmqueue_smallest(zone, order, migratetype);
> if (unlikely(!page)) {
> - if (migratetype == MIGRATE_MOVABLE)
> + if (alloc_flags & ALLOC_CMA)
> page = __rmqueue_cma_fallback(zone, order);
>
> if (!page && __rmqueue_fallback(zone, order, migratetype,
> @@ -3502,11 +3502,9 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
> if (likely(!alloc_harder))
> unusable_free += z->nr_reserved_highatomic;
>
> -#ifdef CONFIG_CMA
> /* If allocation can't use CMA areas don't use free CMA pages */
> - if (!(alloc_flags & ALLOC_CMA))
> + if (IS_ENABLED(CONFIG_CMA) && !(alloc_flags & ALLOC_CMA))
> unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
> -#endif
>
> return unusable_free;
> }
> @@ -3693,6 +3691,16 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
> return alloc_flags;
> }
>
> +static inline void current_alloc_flags(gfp_t gfp_mask,
> + unsigned int *alloc_flags)
> +{
> + unsigned int pflags = READ_ONCE(current->flags);
> +
> + if (!(pflags & PF_MEMALLOC_NOCMA) &&
> + gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
> + *alloc_flags |= ALLOC_CMA;
> +}
> +
> /*
> * get_page_from_freelist goes through the zonelist trying to allocate
> * a page.
> @@ -3706,6 +3714,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
> struct pglist_data *last_pgdat_dirty_limit = NULL;
> bool no_fallback;
>
> + current_alloc_flags(gfp_mask, &alloc_flags);
> +
> retry:
> /*
> * Scan zonelist, looking for a zone with enough free.
> @@ -4339,10 +4349,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
> } else if (unlikely(rt_task(current)) && !in_interrupt())
> alloc_flags |= ALLOC_HARDER;
>
> -#ifdef CONFIG_CMA
> - if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
> - alloc_flags |= ALLOC_CMA;
> -#endif
> return alloc_flags;
> }
>
> @@ -4808,9 +4814,6 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
> if (should_fail_alloc_page(gfp_mask, order))
> return false;
>
> - if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
> - *alloc_flags |= ALLOC_CMA;
> -
> return true;
> }
>
> --
> 2.7.4
--
Michal Hocko
SUSE Labs
Powered by blists - more mailing lists