lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1454517912-10457-5-git-send-email-boqun.feng@gmail.com>
Date:	Thu,  4 Feb 2016 00:45:10 +0800
From:	Boqun Feng <boqun.feng@...il.com>
To:	linux-kernel@...r.kernel.org
Cc:	Peter Zijlstra <peterz@...radead.org>,
	Ingo Molnar <mingo@...nel.org>,
	"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
	Josh Triplett <josh@...htriplett.org>,
	Steven Rostedt <rostedt@...dmis.org>,
	Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
	Lai Jiangshan <jiangshanlai@...il.com>, sasha.levin@...cle.com,
	Boqun Feng <boqun.feng@...il.com>
Subject: [RFC 4/6] lockdep: LOCKED_ACCESS: Introduce locked_access_point()

locked_access_point() is the entry point of the whole LOCKED_ACCESS
framework, every time a locked_access_point() is called, LOCKE_ACCESS
will correlate the data access location with the current acqchain.

So putting locked_access_point() at the data accesses you care about is
the step #2 to use LOCKED_ACCESS.

Signed-off-by: Boqun Feng <boqun.feng@...il.com>
---
 include/linux/lockdep.h  |  29 ++++++++++++
 kernel/locking/lockdep.c | 121 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 150 insertions(+)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 2ffe6c3..cf7b6c8 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -570,4 +570,33 @@ lockdep_rcu_suspicious(const char *file, const int line, const char *s)
 }
 #endif
 
+#ifdef CONFIG_LOCKED_ACCESS
+struct locked_access_location {
+	/* Filename of the access */
+	const char			*filename;
+	/* Line number of the access */
+	long				lineno;
+};
+
+#define LOCKED_ACCESS_TYPE_READ		1 /* read */
+
+extern void locked_access(struct locked_access_class *laclass,
+			  struct locked_access_location *access,
+			  int type);
+
+/*
+ * Entry point of LOCKED_ACCESS, should be called at every place the data
+ * accesses of the laclass happen.
+ *
+ * @_type must be one of the LOCKED_ACCESS_TYPE_*
+ */
+#define locked_access_point(_laclass, _type) \
+({ \
+	static struct locked_access_location a = { \
+				.filename = __FILE__, \
+				.lineno = __LINE__, \
+	}; \
+	locked_access(_laclass, &a, _type); \
+})
+#endif /* CONFIG_LOCKED_ACCESS */
 #endif /* __LINUX_LOCKDEP_H */
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 77732a9..996c2d5 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4503,4 +4503,125 @@ lookup_or_add_acqchain(struct locked_access_class *laclass,
 	return acqchain;
 }
 
+/*
+ * Lookup the data access at @loc in the ->accesses list of @acqchain.
+ *
+ * Must be called after @laclass is initialized.
+ */
+static int lookup_locked_access(struct acqchain *acqchain,
+				struct locked_access_location *loc)
+{
+	struct locked_access_struct *s;
+
+	list_for_each_entry_lockless(s, &acqchain->accesses, list) {
+		if (s->loc == loc)
+			return 1;
+	}
+	return 0;
+
+}
+
+/*
+ * Add the data access at @loc into the ->accesses list of @acqchain.
+ *
+ * Return 1 if one access is added, otherwise return 0.
+ *
+ * Must be called after @laclass is initialized.
+ */
+static int add_locked_access(struct locked_access_class *laclass,
+			     struct acqchain *acqchain,
+			     struct locked_access_location *loc,
+			     int type)
+{
+	unsigned long flags;
+	struct locked_access_struct *s;
+
+	local_irq_save(flags);
+	arch_spin_lock(&laclass->lock);
+
+	/* Lookup again while holding the lock */
+	if (lookup_locked_access(acqchain, loc)) {
+		arch_spin_unlock(&laclass->lock);
+		local_irq_restore(flags);
+		return 1;
+	}
+
+	if (unlikely(laclass->nr_access_structs >= MAX_LOCKED_ACCESS_STRUCTS)) {
+		arch_spin_unlock(&laclass->lock);
+		local_irq_restore(flags);
+		return 0;
+	}
+
+	s = laclass->access_structs + laclass->nr_access_structs;
+	s->loc = loc;
+	s->type = type;
+	laclass->nr_access_structs++;
+
+	/*
+	 * Pair with the list_for_each_entry_lockless() in
+	 * lookup_locked_access()
+	 */
+	list_add_tail_rcu(&s->list, &acqchain->accesses);
+
+	arch_spin_unlock(&laclass->lock);
+	local_irq_restore(flags);
+	return 1;
+}
+
+/*
+ * Correlate the data access at @loc with @acqchain for @laclass
+ *
+ * Must be called after @laclass is initialized.
+ */
+static int correlate_locked_access(struct locked_access_class *laclass,
+				   struct acqchain *acqchain,
+				   struct locked_access_location *loc,
+				   int type)
+{
+	if (lookup_locked_access(acqchain, loc))
+		return 1;
+
+	return add_locked_access(laclass, acqchain, loc, type);
+}
+
+/*
+ * The implementation of entry point locked_access_point(), called when a data
+ * access belonging to @laclass happens.
+ */
+void locked_access(struct locked_access_class *laclass,
+		   struct locked_access_location *loc,
+		   int type)
+{
+	u64 acqchain_key = current->curr_acqchain_key;
+	struct acqchain *acqchain;
+
+	if (unlikely(!laclass))
+		return;
+
+	/*
+	 * Don't track for data access for lockdep itself, because we rely
+	 * on the ->held_locks lockdep maintains, and if ->lockdep_recursion is
+	 * not 0, the lock is not being maintained in ->held_locks.
+	 */
+	if (unlikely(current->lockdep_recursion))
+		return;
+
+	/* Data access outside a critical section */
+	if (current->lockdep_depth <= 0)
+		return;
+
+	/*
+	 * we only check whether laclass is initialized in locked_access,
+	 * because this is the entry point of LOCKED_ACCESS.
+	 */
+	if (unlikely(!smp_load_acquire(&laclass->initialized)))
+		locked_access_class_init(laclass);
+
+	acqchain = lookup_or_add_acqchain(laclass, current, acqchain_key);
+
+	if (acqchain)
+		correlate_locked_access(laclass, acqchain, loc, type);
+}
+EXPORT_SYMBOL(locked_access);
+
 #endif /* CONFIG_LOCKED_ACCESS */
-- 
2.7.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ