lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date: Fri, 28 Jun 2024 11:39:39 +0200
From: Jesper Dangaard Brouer <hawk@...nel.org>
To: Shakeel Butt <shakeel.butt@...ux.dev>
Cc: tj@...nel.org, cgroups@...r.kernel.org, yosryahmed@...gle.com,
 hannes@...xchg.org, lizefan.x@...edance.com, longman@...hat.com,
 kernel-team@...udflare.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH V4 2/2] cgroup/rstat: Avoid thundering herd problem by
 kswapd across NUMA nodes



On 28/06/2024 01.34, Shakeel Butt wrote:
> On Thu, Jun 27, 2024 at 11:18:56PM GMT, Jesper Dangaard Brouer wrote:
>> Avoid lock contention on the global cgroup rstat lock caused by kswapd
>> starting on all NUMA nodes simultaneously. At Cloudflare, we observed
>> massive issues due to kswapd and the specific mem_cgroup_flush_stats()
>> call inlined in shrink_node, which takes the rstat lock.
>>
>> On our 12 NUMA node machines, each with a kswapd kthread per NUMA node,
>> we noted severe lock contention on the rstat lock. This contention
>> causes 12 CPUs to waste cycles spinning every time kswapd runs.
>> Fleet-wide stats (/proc/N/schedstat) for kthreads revealed that we are
>> burning an average of 20,000 CPU cores fleet-wide on kswapd, primarily
>> due to spinning on the rstat lock.
>>
>> To help reviewer follow code: When the Per-CPU-Pages (PCP) freelist is
>> empty,
> 
> Remove the "When the Per-CPU-Pages (PCP) freelist is empty" as there are
> a lot more conditions needed for the waking up kswapds which are not
> needed to be explained here. Just "__alloc_pages_slowpath waking up
> kswapds given the allocation context" or similar text should suffice.
> 

Agree.

>> __alloc_pages_slowpath calls wake_all_kswapds(), causing all
>> kswapdN threads to wake up simultaneously. The kswapd thread invokes
>> shrink_node (via balance_pgdat) triggering the cgroup rstat flush
>> operation as part of its work. This results in kernel self-induced rstat
>> lock contention by waking up all kswapd threads simultaneously.
>> Leveraging this detail: balance_pgdat() have NULL value in
>> target_mem_cgroup, this cause mem_cgroup_flush_stats() to do flush with
>> root_mem_cgroup.
>>
>> To avoid this kind of thundering herd problem, kernel previously had a
>> "stats_flush_ongoing" concept, but this was removed as part of commit
>> 7d7ef0a4686a ("mm: memcg: restore subtree stats flushing"). This patch
>> reintroduce and generalized the concept to apply to all users of cgroup
>> rstat, not just memcg.
>>
>> If there is an ongoing rstat flush, and current cgroup is a descendant,
>> then it is unnecessary to do the flush. For callers to still see updated
>> stats, wait for ongoing flusher to complete before returning, but add
>> timeout as stats are already inaccurate given updaters keeps running.
>>
>> Fixes: 7d7ef0a4686a ("mm: memcg: restore subtree stats flushing").
>> Signed-off-by: Jesper Dangaard Brouer <hawk@...nel.org>
>> ---
>> V3: https://lore.kernel.org/all/171943668946.1638606.1320095353103578332.stgit@firesoul/
>> V2: https://lore.kernel.org/all/171923011608.1500238.3591002573732683639.stgit@firesoul/
>> V1: https://lore.kernel.org/all/171898037079.1222367.13467317484793748519.stgit@firesoul/
>> RFC: https://lore.kernel.org/all/171895533185.1084853.3033751561302228252.stgit@firesoul/
>>
>>   include/linux/cgroup-defs.h |    2 +
>>   kernel/cgroup/rstat.c       |   64 ++++++++++++++++++++++++++++++++++++-------
>>   2 files changed, 55 insertions(+), 11 deletions(-)
>>
>> diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
>> index b36690ca0d3f..a33b37514c29 100644
>> --- a/include/linux/cgroup-defs.h
>> +++ b/include/linux/cgroup-defs.h
>> @@ -548,6 +548,8 @@ struct cgroup {
>>   #ifdef CONFIG_BPF_SYSCALL
>>   	struct bpf_local_storage __rcu  *bpf_cgrp_storage;
>>   #endif
>> +	/* completion queue for cgrp_rstat_ongoing_flusher */
>> +	struct completion flush_done;
>>   
>>   	/* All ancestors including self */
>>   	struct cgroup *ancestors[];
>> diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
>> index 2a42be3a9bb3..a98af43bdce7 100644
>> --- a/kernel/cgroup/rstat.c
>> +++ b/kernel/cgroup/rstat.c
>> @@ -2,6 +2,7 @@
>>   #include "cgroup-internal.h"
>>   
>>   #include <linux/sched/cputime.h>
>> +#include <linux/completion.h>
>>   
>>   #include <linux/bpf.h>
>>   #include <linux/btf.h>
>> @@ -11,6 +12,8 @@
>>   
>>   static DEFINE_SPINLOCK(cgroup_rstat_lock);
>>   static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
>> +static struct cgroup *cgrp_rstat_ongoing_flusher = NULL;
>> +static DECLARE_COMPLETION(cgrp_rstat_flusher_done);
> 
> cgrp_rstat_flusher_done is not needed anymore.
> 

True, I already fixed this yesterday, when reading the patch email myself.

>>   
>>   static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
>>   
>> @@ -312,6 +315,45 @@ static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
>>   	spin_unlock_irq(&cgroup_rstat_lock);
>>   }
>>   
>> +#define MAX_WAIT	msecs_to_jiffies(100)
>> +/* Trylock helper that also checks for on ongoing flusher */
>> +static bool cgroup_rstat_trylock_flusher(struct cgroup *cgrp)
>> +{
>> +	bool locked = __cgroup_rstat_trylock(cgrp, -1);
>> +	if (!locked) {
>> +		struct cgroup *cgrp_ongoing;
>> +
>> +		/* Lock is contended, lets check if ongoing flusher is already
>> +		 * taking care of this, if we are a descendant.
>> +		 */
>> +		cgrp_ongoing = READ_ONCE(cgrp_rstat_ongoing_flusher);
>> +		if (cgrp_ongoing && cgroup_is_descendant(cgrp, cgrp_ongoing)) {
> 
> I wonder if READ_ONCE() and cgroup_is_descendant() needs to happen
> within in rcu section. On a preemptable kernel, let's say we got
> preempted in between them, the flusher was unrelated and got freed
> before we get the CPU. In that case we are accessing freed memory.
> 

I have to think about this some more.

>> +			wait_for_completion_interruptible_timeout(
>> +				&cgrp_ongoing->flush_done, MAX_WAIT);
>> +
>> +			return false;
>> +		}
>> +		__cgroup_rstat_lock(cgrp, -1, false);
>> +	}
>> +	/* Obtained lock, record this cgrp as the ongoing flusher */
>> +	if (!READ_ONCE(cgrp_rstat_ongoing_flusher)) {
> 
> Can the above condition will ever be false?
> 

Yes, I think so, because I realized that cgroup_rstat_flush_locked() can
release/"yield" the lock.  Thus, other CPUs/threads have a chance to
call cgroup_rstat_flush, and try to become the "ongoing-flusher".

With this realization, my __cgroup_rstat_trylock() "signal" to detect
ongoing-flushers is also not a good signal.  I think we/I should move
the ongoing_flusher detection before attempting to aquire the lock.
If doing so, I'm considering adding a tracepoint after
wait_for_completion() to help us see if code is behaving as expected in
prod env.


>> +		reinit_completion(&cgrp->flush_done);
>> +		WRITE_ONCE(cgrp_rstat_ongoing_flusher, cgrp);
>> +	}
>> +
>> +	return true; /* locked */
>> +}
>> +
>> +static void cgroup_rstat_unlock_flusher(struct cgroup *cgrp)
>> +{
>> +	/* Detect if we are the ongoing flusher */
>> +	if (cgrp == READ_ONCE(cgrp_rstat_ongoing_flusher)) {
> 
> Same.

Same explaination as above.

> 
>> +		WRITE_ONCE(cgrp_rstat_ongoing_flusher, NULL);
>> +		complete_all(&cgrp->flush_done);
>> +	}
>> +	__cgroup_rstat_unlock(cgrp, -1);
>> +}
>> +
>>   /* see cgroup_rstat_flush() */
>>   static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
>>   	__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
>> @@ -361,18 +403,13 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
>>    */
>>   __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
>>   {
>> -	bool locked;
>> -
>>   	might_sleep();
>>   
>> -	locked = __cgroup_rstat_trylock(cgrp, -1);
>> -	if (!locked) {
>> -		/* Opportunity to ongoing flush detection */
>> -		__cgroup_rstat_lock(cgrp, -1, false);
>> -	}
>> +	if (!cgroup_rstat_trylock_flusher(cgrp))
>> +		return;
>>   
>>   	cgroup_rstat_flush_locked(cgrp);
>> -	__cgroup_rstat_unlock(cgrp, -1);
>> +	cgroup_rstat_unlock_flusher(cgrp);
>>   }
>>   
>>   /**
>> @@ -388,8 +425,11 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp)
>>   	__acquires(&cgroup_rstat_lock)
>>   {
>>   	might_sleep();
>> -	__cgroup_rstat_lock(cgrp, -1, true);
>> -	cgroup_rstat_flush_locked(cgrp);
>> +
>> +	if (cgroup_rstat_trylock_flusher(cgrp))
>> +		cgroup_rstat_flush_locked(cgrp);
>> +	else
>> +		__cgroup_rstat_lock(cgrp, -1, true);
>>   }
>>   
>>   /**
>> @@ -399,7 +439,7 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp)
>>   void cgroup_rstat_flush_release(struct cgroup *cgrp)
>>   	__releases(&cgroup_rstat_lock)
>>   {
>> -	__cgroup_rstat_unlock(cgrp, -1);
>> +	cgroup_rstat_unlock_flusher(cgrp);
>>   }
>>   
>>   int cgroup_rstat_init(struct cgroup *cgrp)
>> @@ -421,6 +461,8 @@ int cgroup_rstat_init(struct cgroup *cgrp)
>>   		u64_stats_init(&rstatc->bsync);
>>   	}
>>   
>> +	init_completion(&cgrp->flush_done);
>> +
>>   	return 0;
>>   }
>>   
>>
>>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ