[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250429230428.1935619-2-shakeel.butt@linux.dev>
Date: Tue, 29 Apr 2025 16:04:25 -0700
From: Shakeel Butt <shakeel.butt@...ux.dev>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>,
Michal Hocko <mhocko@...nel.org>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Vlastimil Babka <vbabka@...e.cz>,
linux-mm@...ck.org,
cgroups@...r.kernel.org,
linux-kernel@...r.kernel.org,
Meta kernel team <kernel-team@...a.com>
Subject: [PATCH 1/4] memcg: simplify consume_stock
The consume_stock() does not need to check gfp_mask for spinning and can
simply trylock the local lock to decide to proceed or fail. No need to
spin at all for local lock.
Signed-off-by: Shakeel Butt <shakeel.butt@...ux.dev>
---
mm/memcontrol.c | 20 +++++++-------------
1 file changed, 7 insertions(+), 13 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 650fe4314c39..40d0838d88bc 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1804,16 +1804,14 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
* consume_stock: Try to consume stocked charge on this cpu.
* @memcg: memcg to consume from.
* @nr_pages: how many pages to charge.
- * @gfp_mask: allocation mask.
*
- * The charges will only happen if @memcg matches the current cpu's memcg
- * stock, and at least @nr_pages are available in that stock. Failure to
- * service an allocation will refill the stock.
+ * Consume the cached charge if enough nr_pages are present otherwise return
+ * failure. Also return failure for charge request larger than
+ * MEMCG_CHARGE_BATCH or if the local lock is already taken.
*
* returns true if successful, false otherwise.
*/
-static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
- gfp_t gfp_mask)
+static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
struct memcg_stock_pcp *stock;
uint8_t stock_pages;
@@ -1821,12 +1819,8 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
bool ret = false;
int i;
- if (nr_pages > MEMCG_CHARGE_BATCH)
- return ret;
-
- if (gfpflags_allow_spinning(gfp_mask))
- local_lock_irqsave(&memcg_stock.stock_lock, flags);
- else if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags))
+ if (nr_pages > MEMCG_CHARGE_BATCH ||
+ !local_trylock_irqsave(&memcg_stock.stock_lock, flags))
return ret;
stock = this_cpu_ptr(&memcg_stock);
@@ -2329,7 +2323,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned long pflags;
retry:
- if (consume_stock(memcg, nr_pages, gfp_mask))
+ if (consume_stock(memcg, nr_pages))
return 0;
if (!gfpflags_allow_spinning(gfp_mask))
--
2.47.1
Powered by blists - more mailing lists