lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-d1b42b800e5d09dcee52812b4396aca3a3696ba9@git.kernel.org>
Date:   Sat, 14 Jan 2017 04:55:03 -0800
From:   tip-bot for Chris Wilson <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     dev@...ankhorst.nl, chris@...is-wilson.co.uk,
        linux-kernel@...r.kernel.org, hpa@...or.com, mingo@...nel.org,
        nhaehnle@...il.com, tglx@...utronix.de, paulmck@...ux.vnet.ibm.com,
        torvalds@...ux-foundation.org, peterz@...radead.org,
        akpm@...ux-foundation.org
Subject: [tip:locking/core] locking/ww_mutex: Add kselftests for resolving
 ww_mutex cyclic deadlocks

Commit-ID:  d1b42b800e5d09dcee52812b4396aca3a3696ba9
Gitweb:     http://git.kernel.org/tip/d1b42b800e5d09dcee52812b4396aca3a3696ba9
Author:     Chris Wilson <chris@...is-wilson.co.uk>
AuthorDate: Thu, 1 Dec 2016 11:47:09 +0000
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Sat, 14 Jan 2017 11:37:16 +0100

locking/ww_mutex: Add kselftests for resolving ww_mutex cyclic deadlocks

Check that ww_mutexes can detect cyclic deadlocks (generalised ABBA
cycles) and resolve them by lock reordering.

Signed-off-by: Chris Wilson <chris@...is-wilson.co.uk>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Maarten Lankhorst <dev@...ankhorst.nl>
Cc: Nicolai Hähnle <nhaehnle@...il.com>
Cc: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Link: http://lkml.kernel.org/r/20161201114711.28697-7-chris@chris-wilson.co.uk
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 kernel/locking/test-ww_mutex.c | 115 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 115 insertions(+)

diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 5a643ba..84da738 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -21,9 +21,11 @@
 #include <linux/completion.h>
 #include <linux/kthread.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/ww_mutex.h>
 
 static DEFINE_WW_CLASS(ww_class);
+struct workqueue_struct *wq;
 
 struct test_mutex {
 	struct work_struct work;
@@ -243,10 +245,118 @@ static int test_abba(bool resolve)
 	return ret;
 }
 
+struct test_cycle {
+	struct work_struct work;
+	struct ww_mutex a_mutex;
+	struct ww_mutex *b_mutex;
+	struct completion *a_signal;
+	struct completion b_signal;
+	int result;
+};
+
+static void test_cycle_work(struct work_struct *work)
+{
+	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
+	struct ww_acquire_ctx ctx;
+	int err;
+
+	ww_acquire_init(&ctx, &ww_class);
+	ww_mutex_lock(&cycle->a_mutex, &ctx);
+
+	complete(cycle->a_signal);
+	wait_for_completion(&cycle->b_signal);
+
+	err = ww_mutex_lock(cycle->b_mutex, &ctx);
+	if (err == -EDEADLK) {
+		ww_mutex_unlock(&cycle->a_mutex);
+		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
+		err = ww_mutex_lock(&cycle->a_mutex, &ctx);
+	}
+
+	if (!err)
+		ww_mutex_unlock(cycle->b_mutex);
+	ww_mutex_unlock(&cycle->a_mutex);
+	ww_acquire_fini(&ctx);
+
+	cycle->result = err;
+}
+
+static int __test_cycle(unsigned int nthreads)
+{
+	struct test_cycle *cycles;
+	unsigned int n, last = nthreads - 1;
+	int ret;
+
+	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
+	if (!cycles)
+		return -ENOMEM;
+
+	for (n = 0; n < nthreads; n++) {
+		struct test_cycle *cycle = &cycles[n];
+
+		ww_mutex_init(&cycle->a_mutex, &ww_class);
+		if (n == last)
+			cycle->b_mutex = &cycles[0].a_mutex;
+		else
+			cycle->b_mutex = &cycles[n + 1].a_mutex;
+
+		if (n == 0)
+			cycle->a_signal = &cycles[last].b_signal;
+		else
+			cycle->a_signal = &cycles[n - 1].b_signal;
+		init_completion(&cycle->b_signal);
+
+		INIT_WORK(&cycle->work, test_cycle_work);
+		cycle->result = 0;
+	}
+
+	for (n = 0; n < nthreads; n++)
+		queue_work(wq, &cycles[n].work);
+
+	flush_workqueue(wq);
+
+	ret = 0;
+	for (n = 0; n < nthreads; n++) {
+		struct test_cycle *cycle = &cycles[n];
+
+		if (!cycle->result)
+			continue;
+
+		pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
+		       n, nthreads, cycle->result);
+		ret = -EINVAL;
+		break;
+	}
+
+	for (n = 0; n < nthreads; n++)
+		ww_mutex_destroy(&cycles[n].a_mutex);
+	kfree(cycles);
+	return ret;
+}
+
+static int test_cycle(unsigned int ncpus)
+{
+	unsigned int n;
+	int ret;
+
+	for (n = 2; n <= ncpus + 1; n++) {
+		ret = __test_cycle(n);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 static int __init test_ww_mutex_init(void)
 {
+	int ncpus = num_online_cpus();
 	int ret;
 
+	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
+	if (!wq)
+		return -ENOMEM;
+
 	ret = test_mutex();
 	if (ret)
 		return ret;
@@ -263,11 +373,16 @@ static int __init test_ww_mutex_init(void)
 	if (ret)
 		return ret;
 
+	ret = test_cycle(ncpus);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
 static void __exit test_ww_mutex_exit(void)
 {
+	destroy_workqueue(wq);
 }
 
 module_init(test_ww_mutex_init);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ