lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date: Wed, 26 Jun 2024 23:18:03 +0200
From: Jesper Dangaard Brouer <hawk@...nel.org>
To: tj@...nel.org, cgroups@...r.kernel.org, yosryahmed@...gle.com,
 shakeel.butt@...ux.dev
Cc: Jesper Dangaard Brouer <hawk@...nel.org>, hannes@...xchg.org,
 lizefan.x@...edance.com, longman@...hat.com, kernel-team@...udflare.com,
 linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: [PATCH V3 1/2] cgroup/rstat: Helper functions for locking expose
 trylock

Signed-off-by: Jesper Dangaard Brouer <hawk@...nel.org>
---
 kernel/cgroup/rstat.c |   40 ++++++++++++++++++++++++++++++----------
 1 file changed, 30 insertions(+), 10 deletions(-)

diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index fb8b49437573..2a42be3a9bb3 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -279,17 +279,30 @@ __bpf_hook_end();
  * value -1 is used when obtaining the main lock else this is the CPU
  * number processed last.
  */
-static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop)
+static inline bool __cgroup_rstat_trylock(struct cgroup *cgrp, int cpu_in_loop)
+{
+	bool locked;
+
+	locked = spin_trylock_irq(&cgroup_rstat_lock);
+	if (!locked)
+		trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, !locked);
+
+	return locked;
+}
+
+static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop,
+				       bool check_contention)
 	__acquires(&cgroup_rstat_lock)
 {
-	bool contended;
+	bool locked = false;
 
-	contended = !spin_trylock_irq(&cgroup_rstat_lock);
-	if (contended) {
-		trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended);
+	if (check_contention)
+		locked = __cgroup_rstat_trylock(cgrp, cpu_in_loop);
+
+	if (!locked)
 		spin_lock_irq(&cgroup_rstat_lock);
-	}
-	trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended);
+
+	trace_cgroup_rstat_locked(cgrp, cpu_in_loop, !locked);
 }
 
 static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
@@ -328,7 +341,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
 			__cgroup_rstat_unlock(cgrp, cpu);
 			if (!cond_resched())
 				cpu_relax();
-			__cgroup_rstat_lock(cgrp, cpu);
+			__cgroup_rstat_lock(cgrp, cpu, true);
 		}
 	}
 }
@@ -348,9 +361,16 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
  */
 __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
 {
+	bool locked;
+
 	might_sleep();
 
-	__cgroup_rstat_lock(cgrp, -1);
+	locked = __cgroup_rstat_trylock(cgrp, -1);
+	if (!locked) {
+		/* Opportunity to ongoing flush detection */
+		__cgroup_rstat_lock(cgrp, -1, false);
+	}
+
 	cgroup_rstat_flush_locked(cgrp);
 	__cgroup_rstat_unlock(cgrp, -1);
 }
@@ -368,7 +388,7 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp)
 	__acquires(&cgroup_rstat_lock)
 {
 	might_sleep();
-	__cgroup_rstat_lock(cgrp, -1);
+	__cgroup_rstat_lock(cgrp, -1, true);
 	cgroup_rstat_flush_locked(cgrp);
 }
 



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ