lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250312151634.2183278-17-bigeasy@linutronix.de>
Date: Wed, 12 Mar 2025 16:16:29 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: linux-kernel@...r.kernel.org
Cc: André Almeida <andrealmeid@...lia.com>,
	Darren Hart <dvhart@...radead.org>,
	Davidlohr Bueso <dave@...olabs.net>,
	Ingo Molnar <mingo@...hat.com>,
	Juri Lelli <juri.lelli@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Valentin Schneider <vschneid@...hat.com>,
	Waiman Long <longman@...hat.com>,
	Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: [PATCH v10 16/21] futex: Remove superfluous state

From: Peter Zijlstra <peterz@...radead.org>

The whole initial_ref_dropped and release state lead to confusing
code.

[bigeasy: use rcuref_is_dead() ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
 kernel/futex/core.c  | 116 +++++++++++++++++++++----------------------
 kernel/futex/futex.h |   4 +-
 2 files changed, 58 insertions(+), 62 deletions(-)

diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 9b87c4f128f14..37c3e020f2f03 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -61,8 +61,6 @@ struct futex_private_hash {
 	rcuref_t	users;
 	unsigned int	hash_mask;
 	struct rcu_head	rcu;
-	bool		initial_ref_dropped;
-	bool		released;
 	struct futex_hash_bucket queues[];
 };
 
@@ -175,49 +173,32 @@ static void futex_rehash_current_users(struct futex_private_hash *old,
 	}
 }
 
-static void futex_assign_new_hash(struct futex_private_hash *new,
-				  struct mm_struct *mm)
+static bool futex_assign_new_hash(struct mm_struct *mm,
+				  struct futex_private_hash *new)
 {
-	bool drop_init_ref = new != NULL;
 	struct futex_private_hash *fph;
 
-	if (!new) {
-		new = mm->futex_phash_new;
-		mm->futex_phash_new = NULL;
-	}
-	/* Someone was quicker, the current mask is valid */
-	if (!new)
-		return;
+	WARN_ON_ONCE(mm->futex_phash_new);
 
-	fph = rcu_dereference_check(mm->futex_phash,
-				     lockdep_is_held(&mm->futex_hash_lock));
+	fph = rcu_dereference_protected(mm->futex_phash,
+					lockdep_is_held(&mm->futex_hash_lock));
 	if (fph) {
 		if (fph->hash_mask >= new->hash_mask) {
 			/* It was increased again while we were waiting */
 			kvfree(new);
-			return;
+			return true;
 		}
-		/*
-		 * If the caller started the resize then the initial reference
-		 * needs to be dropped. If the object can not be deconstructed
-		 * we save new for later and ensure the reference counter
-		 * is not dropped again.
-		 */
-		if (drop_init_ref &&
-		    (fph->initial_ref_dropped || !futex_put_private_hash(fph))) {
+
+		if (!rcuref_is_dead(&fph->users)) {
 			mm->futex_phash_new = new;
-			fph->initial_ref_dropped = true;
-			return;
-		}
-		if (!READ_ONCE(fph->released)) {
-			mm->futex_phash_new = new;
-			return;
+			return false;
 		}
 
 		futex_rehash_current_users(fph, new);
 	}
 	rcu_assign_pointer(mm->futex_phash, new);
 	kvfree_rcu(fph, rcu);
+	return true;
 }
 
 struct futex_private_hash *futex_get_private_hash(void)
@@ -244,11 +225,26 @@ struct futex_private_hash *futex_get_private_hash(void)
 		if (rcuref_get(&fph->users))
 			return fph;
 	}
-	scoped_guard(mutex, &current->mm->futex_hash_lock)
-		futex_assign_new_hash(NULL, mm);
+	scoped_guard (mutex, &mm->futex_hash_lock) {
+		struct futex_private_hash *fph;
+
+		fph = mm->futex_phash_new;
+		if (fph) {
+			mm->futex_phash_new = NULL;
+			futex_assign_new_hash(mm, fph);
+		}
+	}
 	goto again;
 }
 
+void futex_put_private_hash(struct futex_private_hash *fph)
+{
+	/* Ignore return value, last put is verified via rcuref_is_dead() */
+	if (rcuref_put(&fph->users)) {
+		;
+	}
+}
+
 static struct futex_private_hash *futex_get_private_hb(union futex_key *key)
 {
 	if (!futex_key_is_private(key))
@@ -289,17 +285,6 @@ struct futex_hash_bucket *__futex_hash(union futex_key *key)
 }
 
 #ifdef CONFIG_FUTEX_PRIVATE_HASH
-bool futex_put_private_hash(struct futex_private_hash *fph)
-{
-	bool released;
-
-	guard(preempt)();
-	released = rcuref_put_rcusafe(&fph->users);
-	if (released)
-		WRITE_ONCE(fph->released, true);
-	return released;
-}
-
 /**
  * futex_hash_get - Get an additional reference for the local hash.
  * @hb:		    ptr to the private local hash.
@@ -1376,22 +1361,11 @@ void futex_hash_free(struct mm_struct *mm)
 	struct futex_private_hash *fph;
 
 	kvfree(mm->futex_phash_new);
-	/*
-	 * The mm_struct belonging to the task is about to be removed so all
-	 * threads, that ever accessed the private hash, are gone and the
-	 * pointer can be accessed directly (omitting a RCU-read section or
-	 * lock).
-	 * Since there can not be a thread holding a reference to the private
-	 * hash we free it immediately.
-	 */
 	fph = rcu_dereference_raw(mm->futex_phash);
-	if (!fph)
-		return;
-
-	if (!fph->initial_ref_dropped && WARN_ON(!futex_put_private_hash(fph)))
-		return;
-
-	kvfree(fph);
+	if (fph) {
+		WARN_ON_ONCE(rcuref_read(&fph->users) > 1);
+		kvfree(fph);
+	}
 }
 
 static int futex_hash_allocate(unsigned int hash_slots)
@@ -1420,15 +1394,32 @@ static int futex_hash_allocate(unsigned int hash_slots)
 		return -ENOMEM;
 
 	rcuref_init(&fph->users, 1);
-	fph->initial_ref_dropped = false;
-	fph->released = false;
 	fph->hash_mask = hash_slots - 1;
 
 	for (i = 0; i < hash_slots; i++)
 		futex_hash_bucket_init(&fph->queues[i], fph);
 
 	scoped_guard(mutex, &mm->futex_hash_lock) {
+		if (mm->futex_phash && !mm->futex_phash_new) {
+			/*
+			 * If we have an existing hash, but do not yet have
+			 * allocated a replacement hash, drop the initial
+			 * reference on the existing hash.
+			 *
+			 * Ignore the return value; removal is serialized by
+			 * mm->futex_hash_lock which we currently hold and last
+			 * put is verified via rcuref_is_dead().
+			 */
+			if (rcuref_put(&mm->futex_phash->users)) {
+				;
+			}
+		}
+
 		if (mm->futex_phash_new) {
+			/*
+			 * If we already have a replacement hash pending;
+			 * keep the larger hash.
+			 */
 			if (mm->futex_phash_new->hash_mask <= fph->hash_mask) {
 				hb_tofree = mm->futex_phash_new;
 			} else {
@@ -1437,7 +1428,12 @@ static int futex_hash_allocate(unsigned int hash_slots)
 			}
 			mm->futex_phash_new = NULL;
 		}
-		futex_assign_new_hash(fph, mm);
+
+		/*
+		 * Will set mm->futex_phash_new on failure;
+		 * futex_get_private_hash() will try again.
+		 */
+		futex_assign_new_hash(mm, fph);
 	}
 	kvfree(hb_tofree);
 	return 0;
diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
index 99218d220e534..5b6b58e8a7008 100644
--- a/kernel/futex/futex.h
+++ b/kernel/futex/futex.h
@@ -209,13 +209,13 @@ extern struct futex_hash_bucket *__futex_hash(union futex_key *key);
 extern void futex_hash_get(struct futex_hash_bucket *hb);
 extern void futex_hash_put(struct futex_hash_bucket *hb);
 extern struct futex_private_hash *futex_get_private_hash(void);
-extern bool futex_put_private_hash(struct futex_private_hash *fph);
+extern void futex_put_private_hash(struct futex_private_hash *fph);
 
 #else /* !CONFIG_FUTEX_PRIVATE_HASH */
 static inline void futex_hash_get(struct futex_hash_bucket *hb) { }
 static inline void futex_hash_put(struct futex_hash_bucket *hb) { }
 static inline struct futex_private_hash *futex_get_private_hash(void) { return NULL; }
-static inline bool futex_put_private_hash(struct futex_private_hash *fph) { return false; }
+static inline void futex_put_private_hash(struct futex_private_hash *fph) { }
 #endif
 
 DEFINE_CLASS(hb, struct futex_hash_bucket *,
-- 
2.47.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ