lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250312151634.2183278-16-bigeasy@linutronix.de>
Date: Wed, 12 Mar 2025 16:16:28 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: linux-kernel@...r.kernel.org
Cc: André Almeida <andrealmeid@...lia.com>,
	Darren Hart <dvhart@...radead.org>,
	Davidlohr Bueso <dave@...olabs.net>,
	Ingo Molnar <mingo@...hat.com>,
	Juri Lelli <juri.lelli@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Valentin Schneider <vschneid@...hat.com>,
	Waiman Long <longman@...hat.com>,
	Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: [PATCH v10 15/21] futex: s/hb_p/fph/

From: Peter Zijlstra <peterz@...radead.org>

To me hb_p reads like hash-bucket-private, but these things are
pointers to private hash table, not bucket.

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
 kernel/futex/core.c     | 136 ++++++++++++++++++++--------------------
 kernel/futex/futex.h    |   6 +-
 kernel/futex/waitwake.c |   8 +--
 3 files changed, 75 insertions(+), 75 deletions(-)

diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 229009279ee7d..9b87c4f128f14 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -175,49 +175,49 @@ static void futex_rehash_current_users(struct futex_private_hash *old,
 	}
 }
 
-static void futex_assign_new_hash(struct futex_private_hash *hb_p_new,
+static void futex_assign_new_hash(struct futex_private_hash *new,
 				  struct mm_struct *mm)
 {
-	bool drop_init_ref = hb_p_new != NULL;
-	struct futex_private_hash *hb_p;
+	bool drop_init_ref = new != NULL;
+	struct futex_private_hash *fph;
 
-	if (!hb_p_new) {
-		hb_p_new = mm->futex_phash_new;
+	if (!new) {
+		new = mm->futex_phash_new;
 		mm->futex_phash_new = NULL;
 	}
 	/* Someone was quicker, the current mask is valid */
-	if (!hb_p_new)
+	if (!new)
 		return;
 
-	hb_p = rcu_dereference_check(mm->futex_phash,
+	fph = rcu_dereference_check(mm->futex_phash,
 				     lockdep_is_held(&mm->futex_hash_lock));
-	if (hb_p) {
-		if (hb_p->hash_mask >= hb_p_new->hash_mask) {
+	if (fph) {
+		if (fph->hash_mask >= new->hash_mask) {
 			/* It was increased again while we were waiting */
-			kvfree(hb_p_new);
+			kvfree(new);
 			return;
 		}
 		/*
 		 * If the caller started the resize then the initial reference
 		 * needs to be dropped. If the object can not be deconstructed
-		 * we save hb_p_new for later and ensure the reference counter
+		 * we save new for later and ensure the reference counter
 		 * is not dropped again.
 		 */
 		if (drop_init_ref &&
-		    (hb_p->initial_ref_dropped || !futex_put_private_hash(hb_p))) {
-			mm->futex_phash_new = hb_p_new;
-			hb_p->initial_ref_dropped = true;
+		    (fph->initial_ref_dropped || !futex_put_private_hash(fph))) {
+			mm->futex_phash_new = new;
+			fph->initial_ref_dropped = true;
 			return;
 		}
-		if (!READ_ONCE(hb_p->released)) {
-			mm->futex_phash_new = hb_p_new;
+		if (!READ_ONCE(fph->released)) {
+			mm->futex_phash_new = new;
 			return;
 		}
 
-		futex_rehash_current_users(hb_p, hb_p_new);
+		futex_rehash_current_users(fph, new);
 	}
-	rcu_assign_pointer(mm->futex_phash, hb_p_new);
-	kvfree_rcu(hb_p, rcu);
+	rcu_assign_pointer(mm->futex_phash, new);
+	kvfree_rcu(fph, rcu);
 }
 
 struct futex_private_hash *futex_get_private_hash(void)
@@ -235,14 +235,14 @@ struct futex_private_hash *futex_get_private_hash(void)
 	 */
 again:
 	scoped_guard(rcu) {
-		struct futex_private_hash *hb_p;
+		struct futex_private_hash *fph;
 
-		hb_p = rcu_dereference(mm->futex_phash);
-		if (!hb_p)
+		fph = rcu_dereference(mm->futex_phash);
+		if (!fph)
 			return NULL;
 
-		if (rcuref_get(&hb_p->users))
-			return hb_p;
+		if (rcuref_get(&fph->users))
+			return fph;
 	}
 	scoped_guard(mutex, &current->mm->futex_hash_lock)
 		futex_assign_new_hash(NULL, mm);
@@ -275,12 +275,12 @@ static struct futex_private_hash *futex_get_private_hb(union futex_key *key)
  */
 struct futex_hash_bucket *__futex_hash(union futex_key *key)
 {
-	struct futex_private_hash *hb_p;
+	struct futex_private_hash *fph;
 	u32 hash;
 
-	hb_p = futex_get_private_hb(key);
-	if (hb_p)
-		return futex_hash_private(key, hb_p->queues, hb_p->hash_mask);
+	fph = futex_get_private_hb(key);
+	if (fph)
+		return futex_hash_private(key, fph->queues, fph->hash_mask);
 
 	hash = jhash2((u32 *)key,
 		      offsetof(typeof(*key), both.offset) / 4,
@@ -289,14 +289,14 @@ struct futex_hash_bucket *__futex_hash(union futex_key *key)
 }
 
 #ifdef CONFIG_FUTEX_PRIVATE_HASH
-bool futex_put_private_hash(struct futex_private_hash *hb_p)
+bool futex_put_private_hash(struct futex_private_hash *fph)
 {
 	bool released;
 
 	guard(preempt)();
-	released = rcuref_put_rcusafe(&hb_p->users);
+	released = rcuref_put_rcusafe(&fph->users);
 	if (released)
-		WRITE_ONCE(hb_p->released, true);
+		WRITE_ONCE(fph->released, true);
 	return released;
 }
 
@@ -309,21 +309,21 @@ bool futex_put_private_hash(struct futex_private_hash *hb_p)
  */
 void futex_hash_get(struct futex_hash_bucket *hb)
 {
-	struct futex_private_hash *hb_p = hb->hb_p;
+	struct futex_private_hash *fph = hb->priv;
 
-	if (!hb_p)
+	if (!fph)
 		return;
 
-	WARN_ON_ONCE(!rcuref_get(&hb_p->users));
+	WARN_ON_ONCE(!rcuref_get(&fph->users));
 }
 
 void futex_hash_put(struct futex_hash_bucket *hb)
 {
-	struct futex_private_hash *hb_p = hb->hb_p;
+	struct futex_private_hash *fph = hb->priv;
 
-	if (!hb_p)
+	if (!fph)
 		return;
-	futex_put_private_hash(hb_p);
+	futex_put_private_hash(fph);
 }
 #endif
 
@@ -1167,7 +1167,7 @@ static void compat_exit_robust_list(struct task_struct *curr)
 static void exit_pi_state_list(struct task_struct *curr)
 {
 	struct list_head *next, *head = &curr->pi_state_list;
-	struct futex_private_hash *hb_p;
+	struct futex_private_hash *fph;
 	struct futex_pi_state *pi_state;
 	union futex_key key = FUTEX_KEY_INIT;
 
@@ -1181,7 +1181,7 @@ static void exit_pi_state_list(struct task_struct *curr)
 	 * on the mutex.
 	 */
 	WARN_ON(curr != current);
-	hb_p = futex_get_private_hash();
+	fph = futex_get_private_hash();
 	/*
 	 * We are a ZOMBIE and nobody can enqueue itself on
 	 * pi_state_list anymore, but we have to be careful
@@ -1244,8 +1244,8 @@ static void exit_pi_state_list(struct task_struct *curr)
 		raw_spin_lock_irq(&curr->pi_lock);
 	}
 	raw_spin_unlock_irq(&curr->pi_lock);
-	if (hb_p)
-		futex_put_private_hash(hb_p);
+	if (fph)
+		futex_put_private_hash(fph);
 }
 #else
 static inline void exit_pi_state_list(struct task_struct *curr) { }
@@ -1360,10 +1360,10 @@ void futex_exit_release(struct task_struct *tsk)
 }
 
 static void futex_hash_bucket_init(struct futex_hash_bucket *fhb,
-				   struct futex_private_hash *hb_p)
+				   struct futex_private_hash *fph)
 {
 #ifdef CONFIG_FUTEX_PRIVATE_HASH
-	fhb->hb_p = hb_p;
+	fhb->priv = fph;
 #endif
 	atomic_set(&fhb->waiters, 0);
 	plist_head_init(&fhb->chain);
@@ -1373,7 +1373,7 @@ static void futex_hash_bucket_init(struct futex_hash_bucket *fhb,
 #ifdef CONFIG_FUTEX_PRIVATE_HASH
 void futex_hash_free(struct mm_struct *mm)
 {
-	struct futex_private_hash *hb_p;
+	struct futex_private_hash *fph;
 
 	kvfree(mm->futex_phash_new);
 	/*
@@ -1384,19 +1384,19 @@ void futex_hash_free(struct mm_struct *mm)
 	 * Since there can not be a thread holding a reference to the private
 	 * hash we free it immediately.
 	 */
-	hb_p = rcu_dereference_raw(mm->futex_phash);
-	if (!hb_p)
+	fph = rcu_dereference_raw(mm->futex_phash);
+	if (!fph)
 		return;
 
-	if (!hb_p->initial_ref_dropped && WARN_ON(!futex_put_private_hash(hb_p)))
+	if (!fph->initial_ref_dropped && WARN_ON(!futex_put_private_hash(fph)))
 		return;
 
-	kvfree(hb_p);
+	kvfree(fph);
 }
 
 static int futex_hash_allocate(unsigned int hash_slots)
 {
-	struct futex_private_hash *hb_p, *hb_tofree = NULL;
+	struct futex_private_hash *fph, *hb_tofree = NULL;
 	struct mm_struct *mm = current->mm;
 	size_t alloc_size;
 	int i;
@@ -1415,29 +1415,29 @@ static int futex_hash_allocate(unsigned int hash_slots)
 					&alloc_size)))
 		return -ENOMEM;
 
-	hb_p = kvmalloc(alloc_size, GFP_KERNEL_ACCOUNT);
-	if (!hb_p)
+	fph = kvmalloc(alloc_size, GFP_KERNEL_ACCOUNT);
+	if (!fph)
 		return -ENOMEM;
 
-	rcuref_init(&hb_p->users, 1);
-	hb_p->initial_ref_dropped = false;
-	hb_p->released = false;
-	hb_p->hash_mask = hash_slots - 1;
+	rcuref_init(&fph->users, 1);
+	fph->initial_ref_dropped = false;
+	fph->released = false;
+	fph->hash_mask = hash_slots - 1;
 
 	for (i = 0; i < hash_slots; i++)
-		futex_hash_bucket_init(&hb_p->queues[i], hb_p);
+		futex_hash_bucket_init(&fph->queues[i], fph);
 
 	scoped_guard(mutex, &mm->futex_hash_lock) {
 		if (mm->futex_phash_new) {
-			if (mm->futex_phash_new->hash_mask <= hb_p->hash_mask) {
+			if (mm->futex_phash_new->hash_mask <= fph->hash_mask) {
 				hb_tofree = mm->futex_phash_new;
 			} else {
-				hb_tofree = hb_p;
-				hb_p = mm->futex_phash_new;
+				hb_tofree = fph;
+				fph = mm->futex_phash_new;
 			}
 			mm->futex_phash_new = NULL;
 		}
-		futex_assign_new_hash(hb_p, mm);
+		futex_assign_new_hash(fph, mm);
 	}
 	kvfree(hb_tofree);
 	return 0;
@@ -1446,16 +1446,16 @@ static int futex_hash_allocate(unsigned int hash_slots)
 int futex_hash_allocate_default(void)
 {
 	unsigned int threads, buckets, current_buckets = 0;
-	struct futex_private_hash *hb_p;
+	struct futex_private_hash *fph;
 
 	if (!current->mm)
 		return 0;
 
 	scoped_guard(rcu) {
 		threads = min_t(unsigned int, get_nr_threads(current), num_online_cpus());
-		hb_p = rcu_dereference(current->mm->futex_phash);
-		if (hb_p)
-			current_buckets = hb_p->hash_mask + 1;
+		fph = rcu_dereference(current->mm->futex_phash);
+		if (fph)
+			current_buckets = fph->hash_mask + 1;
 	}
 
 	/*
@@ -1473,12 +1473,12 @@ int futex_hash_allocate_default(void)
 
 static int futex_hash_get_slots(void)
 {
-	struct futex_private_hash *hb_p;
+	struct futex_private_hash *fph;
 
 	guard(rcu)();
-	hb_p = rcu_dereference(current->mm->futex_phash);
-	if (hb_p)
-		return hb_p->hash_mask + 1;
+	fph = rcu_dereference(current->mm->futex_phash);
+	if (fph)
+		return fph->hash_mask + 1;
 	return 0;
 }
 
diff --git a/kernel/futex/futex.h b/kernel/futex/futex.h
index 782021feffe2e..99218d220e534 100644
--- a/kernel/futex/futex.h
+++ b/kernel/futex/futex.h
@@ -118,7 +118,7 @@ struct futex_hash_bucket {
 	atomic_t waiters;
 	spinlock_t lock;
 	struct plist_head chain;
-	struct futex_private_hash *hb_p;
+	struct futex_private_hash *priv;
 } ____cacheline_aligned_in_smp;
 
 /*
@@ -209,13 +209,13 @@ extern struct futex_hash_bucket *__futex_hash(union futex_key *key);
 extern void futex_hash_get(struct futex_hash_bucket *hb);
 extern void futex_hash_put(struct futex_hash_bucket *hb);
 extern struct futex_private_hash *futex_get_private_hash(void);
-extern bool futex_put_private_hash(struct futex_private_hash *hb_p);
+extern bool futex_put_private_hash(struct futex_private_hash *fph);
 
 #else /* !CONFIG_FUTEX_PRIVATE_HASH */
 static inline void futex_hash_get(struct futex_hash_bucket *hb) { }
 static inline void futex_hash_put(struct futex_hash_bucket *hb) { }
 static inline struct futex_private_hash *futex_get_private_hash(void) { return NULL; }
-static inline bool futex_put_private_hash(struct futex_private_hash *hb_p) { return false; }
+static inline bool futex_put_private_hash(struct futex_private_hash *fph) { return false; }
 #endif
 
 DEFINE_CLASS(hb, struct futex_hash_bucket *,
diff --git a/kernel/futex/waitwake.c b/kernel/futex/waitwake.c
index 67eebb5b4b212..0d150453a0b41 100644
--- a/kernel/futex/waitwake.c
+++ b/kernel/futex/waitwake.c
@@ -493,7 +493,7 @@ static int __futex_wait_multiple_setup(struct futex_vector *vs, int count, int *
 
 int futex_wait_multiple_setup(struct futex_vector *vs, int count, int *woken)
 {
-	struct futex_private_hash *hb_p;
+	struct futex_private_hash *fph;
 	int ret;
 
 	/*
@@ -501,10 +501,10 @@ int futex_wait_multiple_setup(struct futex_vector *vs, int count, int *woken)
 	 * hash to avoid blocking on mm_struct::futex_hash_bucket during rehash
 	 * after changing the task state.
 	 */
-	hb_p = futex_get_private_hash();
+	fph = futex_get_private_hash();
 	ret = __futex_wait_multiple_setup(vs, count, woken);
-	if (hb_p)
-		futex_put_private_hash(hb_p);
+	if (fph)
+		futex_put_private_hash(fph);
 	return ret;
 }
 
-- 
2.47.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ