lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230526205855.587341916@infradead.org>
Date:   Fri, 26 May 2023 22:52:05 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     torvalds@...ux-foundation.org, keescook@...omium.org,
        gregkh@...uxfoundation.org, pbonzini@...hat.com
Cc:     linux-kernel@...r.kernel.org, ojeda@...nel.org,
        ndesaulniers@...gle.com, peterz@...radead.org, mingo@...hat.com,
        will@...nel.org, longman@...hat.com, boqun.feng@...il.com,
        juri.lelli@...hat.com, vincent.guittot@...aro.org,
        dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
        mgorman@...e.de, bristot@...hat.com, vschneid@...hat.com,
        paulmck@...nel.org, frederic@...nel.org, quic_neeraju@...cinc.com,
        joel@...lfernandes.org, josh@...htriplett.org,
        mathieu.desnoyers@...icios.com, jiangshanlai@...il.com,
        rcu@...r.kernel.org, tj@...nel.org, tglx@...utronix.de
Subject: [PATCH v2 1/2] locking: Introduce __cleanup__ based guards

Use __attribute__((__cleanup__(func))) to buid pointer and lock
guards.

Actual usage in the next patch

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 include/linux/compiler_attributes.h |    6 +
 include/linux/guards.h              |  142 ++++++++++++++++++++++++++++++++++++
 include/linux/irqflags.h            |    7 +
 include/linux/mutex.h               |    5 +
 include/linux/preempt.h             |    4 +
 include/linux/rcupdate.h            |    3 
 include/linux/sched/task.h          |    2 
 include/linux/spinlock.h            |   27 ++++++
 scripts/checkpatch.pl               |    2 
 9 files changed, 197 insertions(+), 1 deletion(-)

--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -77,6 +77,12 @@
 #define __attribute_const__             __attribute__((__const__))
 
 /*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
+ */
+#define __cleanup(func)			__attribute__((__cleanup__(func)))
+
+/*
  * Optional: only supported since gcc >= 9
  * Optional: not supported by clang
  *
--- /dev/null
+++ b/include/linux/guards.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GUARDS_H
+#define __LINUX_GUARDS_H
+
+#include <linux/compiler_attributes.h>
+
+/*
+ * Pointer Guards are special pointers (variables) with a scope bound cleanup
+ * function.
+ *
+ * Various guard types can be created using:
+ *
+ *   DEFINE_PTR_GUARD(guard_type, typename, cleanup-exp)
+ *
+ * After which they can be used like so:
+ *
+ *   ptr_guard(guard_type, name) = find_get_object(foo);
+ *
+ * Where the return type of find_get_object() should match the guard_type's
+ * 'typname *'. And when @name goes out of scope cleanup-exp is ran (inserted
+ * by the compiler) when !NULL at that time. Also see the __cleanup attribute.
+ */
+
+#define DEFINE_PTR_GUARD(_type, _Type, _Put)					\
+typedef _Type *ptr_guard_##_type##_t;						\
+										\
+static inline void ptr_guard_##_type##_cleanup(_Type **_ptr)			\
+{										\
+	_Type *_G = *_ptr;							\
+	if (_G)									\
+		_Put(_G);							\
+}
+
+#define ptr_guard(_type, _name)							\
+	ptr_guard_##_type##_t _name __cleanup(ptr_guard_##_type##_cleanup)
+
+
+/*
+ * Lock Guards are like the pointer guards above except they also have
+ * a fixed initializor to cover both the Lock and Unlock of the lock type.
+ *
+ * Lock guards types can be created using:
+ *
+ *   DEFINE_LOCK_GUARD_0(guard_type, Lock, Unlock, [extra guard members])
+ *   DEFINE_LOCK_GUARD_1(guard_type, typename, Lock, Unlock, ...)
+ *   DEFINE_LOCK_GUARD_2(guard_type, typename, Lock, Unlock, ...)
+ *
+ * Where the _n suffix indicates the number of arguments of 'typename *' the
+ * Lock function requires.
+ *
+ * Once defined, the lock guards can be used in one of two ways:
+ *
+ *	guard(guard_type, name, var...);
+ *
+ * or:
+ *
+ *	scoped (guard_type, var...) {
+ *		...
+ *	}
+ *
+ * The first creates a named variable that is initialized with the Lock
+ * function and will call the Unlock function when it goes out of scope.
+ *
+ * The second creates an explicit scope, using a for-loop with an implicit
+ * named _scope variable. Again, Lock is called before the scope is entered and
+ * Unlock will be called when the scope is left.
+ *
+ * Both Lock and Unlock are expressions and can access the guard object through
+ * the _G pointer. The guard object will have _n implicit members called of
+ * type 'typename *' called 'lock' and 'lock2' as well as any additional
+ * members specified in the definition.
+ */
+
+#define DEFINE_LOCK_GUARD_0(_type, _Lock, _Unlock, ...)				\
+typedef struct {								\
+	__VA_ARGS__;								\
+} lock_guard_##_type##_t;							\
+										\
+static inline void lock_guard_##_type##_cleanup(lock_guard_##_type##_t *_G)	\
+{										\
+	_Unlock;								\
+}										\
+										\
+static inline lock_guard_##_type##_t lock_guard_##_type##_init(void)		\
+{										\
+	lock_guard_##_type##_t _g = { }, *_G __maybe_unused = &_g;		\
+	_Lock;									\
+	return _g;								\
+}
+
+#define DEFINE_LOCK_GUARD_1(_type, _Type, _Lock, _Unlock, ...)			\
+typedef struct {								\
+	_Type *lock;								\
+	__VA_ARGS__;								\
+} lock_guard_##_type##_t;							\
+										\
+static inline void lock_guard_##_type##_cleanup(lock_guard_##_type##_t *_G)	\
+{										\
+	_Unlock;								\
+}										\
+										\
+static inline lock_guard_##_type##_t lock_guard_##_type##_init(_Type *lock)	\
+{										\
+	lock_guard_##_type##_t _g = { .lock = lock }, *_G = &_g;		\
+	_Lock;									\
+	return _g;								\
+}
+
+#define DEFINE_LOCK_GUARD_2(_type, _Type, _Lock, _Unlock, ...)			\
+typedef struct {								\
+	_Type *lock;								\
+	_Type *lock2;								\
+	__VA_ARGS__;								\
+} lock_guard_##_type##_t;							\
+										\
+static inline void lock_guard_##_type##_cleanup(lock_guard_##_type##_t *_G)	\
+{										\
+	_Unlock;								\
+}										\
+										\
+static inline lock_guard_##_type##_t						\
+lock_guard_##_type##_init(_Type *lock, _Type *lock2)				\
+{										\
+	lock_guard_##_type##_t _g = { .lock = lock, .lock2 = lock2 }, *_G = &_g;\
+	_Lock;									\
+	return _g;								\
+}
+
+#define variable_scope(_type, _enter, _exit)					\
+	for (_type *_done = NULL, _scope __cleanup(_exit) = _enter;		\
+	     !_done; _done = (void *)8)
+
+#define scoped(_type, _var...)							\
+	variable_scope(lock_guard_##_type##_t,					\
+		       lock_guard_##_type##_init(_var),				\
+		       lock_guard_##_type##_cleanup)
+
+#define guard(_type, _name, _var...)						\
+	lock_guard_##_type##_t __cleanup(lock_guard_##_type##_cleanup) _name =	\
+		lock_guard_##_type##_init(_var)
+
+#endif /* __LINUX_GUARDS_H */
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -13,6 +13,7 @@
 #define _LINUX_TRACE_IRQFLAGS_H
 
 #include <linux/typecheck.h>
+#include <linux/guards.h>
 #include <asm/irqflags.h>
 #include <asm/percpu.h>
 
@@ -267,4 +268,10 @@ extern void warn_bogus_irq_restore(void)
 
 #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
 
+DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
+DEFINE_LOCK_GUARD_0(irqsave,
+		    local_irq_save(_G->flags),
+		    local_irq_restore(_G->flags),
+		    unsigned long flags)
+
 #endif
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -19,6 +19,7 @@
 #include <asm/processor.h>
 #include <linux/osq_lock.h>
 #include <linux/debug_locks.h>
+#include <linux/guards.h>
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define __DEP_MAP_MUTEX_INITIALIZER(lockname)			\
@@ -219,4 +220,8 @@ extern void mutex_unlock(struct mutex *l
 
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
+DEFINE_LOCK_GUARD_1(mutex, struct mutex,
+		    mutex_lock(_G->lock),
+		    mutex_unlock(_G->lock))
+
 #endif /* __LINUX_MUTEX_H */
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,6 +8,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/guards.h>
 #include <linux/list.h>
 
 /*
@@ -463,4 +464,7 @@ static __always_inline void preempt_enab
 		preempt_enable();
 }
 
+DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
 #endif /* __LINUX_PREEMPT_H */
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -27,6 +27,7 @@
 #include <linux/preempt.h>
 #include <linux/bottom_half.h>
 #include <linux/lockdep.h>
+#include <linux/guards.h>
 #include <asm/processor.h>
 #include <linux/cpumask.h>
 #include <linux/context_tracking_irq.h>
@@ -1095,4 +1096,6 @@ rcu_head_after_call_rcu(struct rcu_head
 extern int rcu_expedited;
 extern int rcu_normal;
 
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+
 #endif /* __LINUX_RCUPDATE_H */
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -126,6 +126,8 @@ static inline void put_task_struct(struc
 		__put_task_struct(t);
 }
 
+DEFINE_PTR_GUARD(put_task, struct task_struct, put_task_struct)
+
 static inline void put_task_struct_many(struct task_struct *t, int nr)
 {
 	if (refcount_sub_and_test(nr, &t->usage))
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -61,6 +61,7 @@
 #include <linux/stringify.h>
 #include <linux/bottom_half.h>
 #include <linux/lockdep.h>
+#include <linux/guards.h>
 #include <asm/barrier.h>
 #include <asm/mmiowb.h>
 
@@ -502,5 +503,31 @@ int __alloc_bucket_spinlocks(spinlock_t
 
 void free_bucket_spinlocks(spinlock_t *locks);
 
+DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
+		    raw_spin_lock(_G->lock),
+		    raw_spin_unlock(_G->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
+		    raw_spin_lock_irq(_G->lock),
+		    raw_spin_unlock_irq(_G->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
+		    raw_spin_lock_irqsave(_G->lock, _G->flags),
+		    raw_spin_unlock_irqrestore(_G->lock, _G->flags),
+		    unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
+		    spin_lock(_G->lock),
+		    spin_unlock(_G->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
+		    spin_lock_irq(_G->lock),
+		    spin_unlock_irq(_G->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
+		    spin_lock_irqsave(_G->lock, _G->flags),
+		    spin_unlock_irqrestore(_G->lock, _G->flags),
+		    unsigned long flags)
+
 #undef __LINUX_INSIDE_SPINLOCK_H
 #endif /* __LINUX_SPINLOCK_H */
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5046,7 +5046,7 @@ sub process {
 				if|for|while|switch|return|case|
 				volatile|__volatile__|
 				__attribute__|format|__extension__|
-				asm|__asm__)$/x)
+				asm|__asm__|scoped)$/x)
 			{
 			# cpp #define statements have non-optional spaces, ie
 			# if there is a space between the name and the open


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ