[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230823131350.114942-2-alexandru.elisei@arm.com>
Date: Wed, 23 Aug 2023 14:13:14 +0100
From: Alexandru Elisei <alexandru.elisei@....com>
To: catalin.marinas@....com, will@...nel.org, oliver.upton@...ux.dev,
maz@...nel.org, james.morse@....com, suzuki.poulose@....com,
yuzenghui@...wei.com, arnd@...db.de, akpm@...ux-foundation.org,
mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com, mhiramat@...nel.org,
rppt@...nel.org, hughd@...gle.com
Cc: pcc@...gle.com, steven.price@....com, anshuman.khandual@....com,
vincenzo.frascino@....com, david@...hat.com, eugenis@...gle.com,
kcc@...gle.com, hyesoo.yu@...sung.com,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
kvmarm@...ts.linux.dev, linux-fsdevel@...r.kernel.org,
linux-arch@...r.kernel.org, linux-mm@...ck.org,
linux-trace-kernel@...r.kernel.org
Subject: [PATCH RFC 01/37] mm: page_alloc: Rename gfp_to_alloc_flags_cma -> gfp_to_alloc_flags_fast
gfp_to_alloc_flags_cma() is called on the fast path of the page allocator
and all it does is set the ALLOC_CMA flag if all the conditions are met for
the allocation to be satisfied from the MIGRATE_CMA list. Rename it to be
more generic, as it will soon have to handle another another flag.
Signed-off-by: Alexandru Elisei <alexandru.elisei@....com>
---
mm/page_alloc.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7d3460c7a480..e6f950c54494 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3081,7 +3081,7 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
}
/* Must be called after current_gfp_context() which can change gfp_mask */
-static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
+static inline unsigned int gfp_to_alloc_flags_fast(gfp_t gfp_mask,
unsigned int alloc_flags)
{
#ifdef CONFIG_CMA
@@ -3784,7 +3784,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
} else if (unlikely(rt_task(current)) && in_task())
alloc_flags |= ALLOC_MIN_RESERVE;
- alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
+ alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, alloc_flags);
return alloc_flags;
}
@@ -4074,7 +4074,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
if (reserve_flags)
- alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
+ alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, reserve_flags) |
(alloc_flags & ALLOC_KSWAPD);
/*
@@ -4250,7 +4250,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
if (should_fail_alloc_page(gfp_mask, order))
return false;
- *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
+ *alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, *alloc_flags);
/* Dirty zone balancing only done in the fast path */
ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
--
2.41.0
Powered by blists - more mailing lists