lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241219-b4-ovpn-v16-6-3e3001153683@openvpn.net>
Date: Thu, 19 Dec 2024 02:42:00 +0100
From: Antonio Quartulli <antonio@...nvpn.net>
To: netdev@...r.kernel.org, Eric Dumazet <edumazet@...gle.com>, 
 Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>, 
 Donald Hunter <donald.hunter@...il.com>, 
 Antonio Quartulli <antonio@...nvpn.net>, Shuah Khan <shuah@...nel.org>, 
 sd@...asysnail.net, ryazanov.s.a@...il.com, 
 Andrew Lunn <andrew+netdev@...n.ch>
Cc: Simon Horman <horms@...nel.org>, linux-kernel@...r.kernel.org, 
 linux-kselftest@...r.kernel.org, Xiao Liang <shaw.leon@...il.com>, 
 Will Deacon <will@...nel.org>, Peter Zijlstra <peterz@...radead.org>, 
 Boqun Feng <boqun.feng@...il.com>, Mark Rutland <mark.rutland@....com>, 
 Andrew Morton <akpm@...ux-foundation.org>
Subject: [PATCH net-next v16 06/26] kref/refcount: implement
 kref_put_sock()

Similarly so kref_put_lock(), decrease the refcount
and call bh_lock_sock(sk) if it reached 0.

This kref_put variant comes handy when in need of
atomically cleanup any socket context along with
setting the refcount to 0.

Cc: Will Deacon <will@...nel.org> (maintainer:ATOMIC INFRASTRUCTURE)
Cc: Peter Zijlstra <peterz@...radead.org> (maintainer:ATOMIC INFRASTRUCTURE)
Cc: Boqun Feng <boqun.feng@...il.com> (reviewer:ATOMIC INFRASTRUCTURE)
Cc: Mark Rutland <mark.rutland@....com> (reviewer:ATOMIC INFRASTRUCTURE)
Cc: Andrew Morton <akpm@...ux-foundation.org>
Signed-off-by: Antonio Quartulli <antonio@...nvpn.net>
---
 include/linux/kref.h     | 11 +++++++++++
 include/linux/refcount.h |  3 +++
 lib/refcount.c           | 32 ++++++++++++++++++++++++++++++++
 3 files changed, 46 insertions(+)

diff --git a/include/linux/kref.h b/include/linux/kref.h
index d32e21a2538c292452db99b915b1bb6c3ab15e53..68b11b0c9c0fdd17cf706e0eb15f74cfe2efa178 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -90,6 +90,17 @@ static inline int kref_put_lock(struct kref *kref,
 	return 0;
 }
 
+static inline int kref_put_sock(struct kref *kref,
+				void (*release)(struct kref *kref),
+				struct sock *sock)
+{
+	if (refcount_dec_and_lock_sock(&kref->refcount, sock)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
+
 /**
  * kref_get_unless_zero - Increment refcount for object unless it is zero.
  * @kref: object.
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 35f039ecb2725618ca098e3515c6e19e2aece3ee..22698db1f24fdaf884a508b8a0cd44eb62194a9f 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -100,6 +100,7 @@
 #include <linux/spinlock_types.h>
 
 struct mutex;
+struct sock;
 
 #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
 #define REFCOUNT_MAX		INT_MAX
@@ -358,4 +359,6 @@ extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
 						       spinlock_t *lock,
 						       unsigned long *flags) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock_sock(refcount_t *r,
+						    struct sock *sock);
 #endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3ca35890671e51c480266d89e4d8d6..76a728581aa49a41ef13f5141f3f2e9816d72e75 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -7,6 +7,7 @@
 #include <linux/refcount.h>
 #include <linux/spinlock.h>
 #include <linux/bug.h>
+#include <net/sock.h>
 
 #define REFCOUNT_WARN(str)	WARN_ONCE(1, "refcount_t: " str ".\n")
 
@@ -156,6 +157,37 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 }
 EXPORT_SYMBOL(refcount_dec_and_lock);
 
+/**
+ * refcount_dec_and_lock_sock - return holding locked sock if able to decrement
+ *				refcount to 0
+ * @r: the refcount
+ * @sock: the sock to be locked
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold sock if able to decrement refcount to 0, false
+ *	   otherwise
+ */
+bool refcount_dec_and_lock_sock(refcount_t *r, struct sock *sock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	bh_lock_sock(sock);
+	if (!refcount_dec_and_test(r)) {
+		bh_unlock_sock(sock);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_sock);
+
 /**
  * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
  *                                 interrupts if able to decrement refcount to 0

-- 
2.45.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ