[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20161130003548.22266-4-chris@chris-wilson.co.uk>
Date: Wed, 30 Nov 2016 00:35:48 +0000
From: Chris Wilson <chris@...is-wilson.co.uk>
To: linux-kernel@...r.kernel.org
Cc: Chris Wilson <chris@...is-wilson.co.uk>,
Peter Zijlstra <peterz@...radead.org>,
Maarten Lankhorst <dev@...ankhorst.nl>,
Nicolai Hähnle <nhaehnle@...il.com>
Subject: [PATCH 4/4] locking: Add kselftests for ww_mutex stress
Signed-off-by: Chris Wilson <chris@...is-wilson.co.uk>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Maarten Lankhorst <dev@...ankhorst.nl>
Cc: Nicolai Hähnle <nhaehnle@...il.com>
---
kernel/locking/test-ww_mutex.c | 134 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 134 insertions(+)
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 63a5031de138..c367014f62dc 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -21,6 +21,9 @@
#include <linux/kthread.h>
#include <linux/ww_mutex.h>
#include <linux/completion.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
@@ -224,6 +227,129 @@ static int test_abba(void)
return ret;
}
+struct stress {
+ struct work_struct work;
+ struct ww_mutex *locks;
+ int nlocks;
+};
+
+static int *get_random_order(int count)
+{
+ int *order;
+ int n, r, tmp;
+
+ order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
+ if (!order)
+ return order;
+
+ for (n = 0; n < count; n++)
+ order[n] = n;
+
+ for (n = count - 1; n > 1; n--) {
+ r = get_random_int() % (n + 1);
+ if (r != n) {
+ tmp = order[n];
+ order[n] = order[r];
+ order[r] = tmp;
+ }
+ }
+
+ return order;
+}
+
+static void stress_work(struct work_struct *work)
+{
+ struct stress *stress = container_of(work, typeof(*stress), work);
+ const int nlocks = stress->nlocks;
+ struct ww_mutex *locks = stress->locks;
+ struct ww_acquire_ctx ctx;
+ int contended = -1;
+ int *order;
+ int n, ret;
+
+ order = get_random_order(nlocks);
+ if (!order)
+ return;
+
+ ww_acquire_init(&ctx, &ww_class);
+
+retry:
+ ret = 0;
+ for (n = 0; n < nlocks; n++) {
+ if (n == contended)
+ continue;
+
+ ret = ww_mutex_lock(&locks[order[n]], &ctx);
+ if (ret < 0)
+ break;
+ }
+ if (!ret)
+ usleep_range(1000, 2000); /* dummy load */
+
+ if (contended > n)
+ ww_mutex_unlock(&locks[order[contended]]);
+ contended = n;
+ while (n--)
+ ww_mutex_unlock(&locks[order[n]]);
+
+ if (ret == -EDEADLK) {
+ ww_mutex_lock_slow(&locks[order[contended]], &ctx);
+ goto retry;
+ }
+
+ if (ret)
+ pr_err_once("ww_mutex stress test failed with %d\n", ret);
+
+ ww_acquire_fini(&ctx);
+
+ kfree(order);
+ kfree(stress);
+}
+
+static int stress(int nlocks, int count)
+{
+ struct ww_mutex *locks;
+ struct workqueue_struct *wq;
+ int ret = -ENOMEM;
+ int n;
+
+ wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
+ if (!wq)
+ return -ENOMEM;
+
+ locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
+ if (!locks)
+ goto err;
+
+ for (n = 0; n < nlocks; n++)
+ ww_mutex_init(&locks[n], &ww_class);
+
+ for (n = 0; n < count; n++) {
+ struct stress *stress;
+
+ stress = kmalloc(sizeof(*stress), GFP_KERNEL);
+ if (!stress)
+ break;
+
+ INIT_WORK(&stress->work, stress_work);
+ stress->locks = locks;
+ stress->nlocks = nlocks;
+
+ queue_work(wq, &stress->work);
+ }
+
+ flush_workqueue(wq);
+
+ for (n = 0; n < nlocks; n++)
+ ww_mutex_destroy(&locks[n]);
+ kfree(locks);
+
+ ret = 0;
+err:
+ destroy_workqueue(wq);
+ return ret;
+}
+
static int __init test_ww_mutex_init(void)
{
int ret;
@@ -240,6 +366,14 @@ static int __init test_ww_mutex_init(void)
if (ret)
return ret;
+ ret = stress(16, 1024);
+ if (ret)
+ return ret;
+
+ ret = stress(4096, 1024);
+ if (ret)
+ return ret;
+
return 0;
}
--
2.10.2
Powered by blists - more mailing lists