lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180907214047.26914-36-jschoenh@amazon.de>
Date:   Fri,  7 Sep 2018 23:40:22 +0200
From:   Jan H. Schönherr <jschoenh@...zon.de>
To:     Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>
Cc:     Jan H. Schönherr <jschoenh@...zon.de>,
        linux-kernel@...r.kernel.org
Subject: [RFC 35/60] cosched: Adjust rq_lock() functions to work with hierarchical runqueues

Locks within the runqueue hierarchy are always taken from bottom to top
to avoid deadlocks. Let the lock validator know about this by declaring
different runqueue levels as distinct lock classes.

Signed-off-by: Jan H. Schönherr <jschoenh@...zon.de>
---
 kernel/sched/sched.h | 29 ++++++++++++++++++++++++++---
 1 file changed, 26 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 594eb9489f3d..bc3631b8b955 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2083,11 +2083,26 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
 	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
 }
 
+#ifdef CONFIG_COSCHEDULING
+/*
+ * The hierarchical runqueues have locks which are taken from bottom to
+ * top. For lock validation, we use the level to calculate the subclass.
+ * As it is sometimes necessary to take two locks on the same level, we
+ * leave some space in the subclass values for that purpose.
+ */
+#define RQ_LOCK_SUBCLASS(rq)		(2 * (rq)->sdrq_data.level)
+#define RQ_LOCK_SUBCLASS_NESTED(rq)	(2 * (rq)->sdrq_data.level + 1)
+#else
+#define RQ_LOCK_SUBCLASS(rq)		0
+#define RQ_LOCK_SUBCLASS_NESTED(rq)	SINGLE_DEPTH_NESTING
+#endif
+
 static inline void
 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
 	__acquires(rq->lock)
 {
-	raw_spin_lock_irqsave(&rq->lock, rf->flags);
+	raw_spin_lock_irqsave_nested(&rq->lock, rf->flags,
+				     RQ_LOCK_SUBCLASS(rq));
 	rq_pin_lock(rq, rf);
 }
 
@@ -2095,6 +2110,14 @@ static inline void
 rq_lock_irq(struct rq *rq, struct rq_flags *rf)
 	__acquires(rq->lock)
 {
+	/*
+	 * There's no raw_spin_lock_irq_nested(). This is probably fine, as at
+	 * most the first lock should be acquired this way. There might be some
+	 * false negatives, though, if we start with a non-bottom lock and
+	 * classify it incorrectly.
+	 */
+	SCHED_WARN_ON(RQ_LOCK_SUBCLASS(rq));
+
 	raw_spin_lock_irq(&rq->lock);
 	rq_pin_lock(rq, rf);
 }
@@ -2103,7 +2126,7 @@ static inline void
 rq_lock(struct rq *rq, struct rq_flags *rf)
 	__acquires(rq->lock)
 {
-	raw_spin_lock(&rq->lock);
+	raw_spin_lock_nested(&rq->lock, RQ_LOCK_SUBCLASS(rq));
 	rq_pin_lock(rq, rf);
 }
 
@@ -2111,7 +2134,7 @@ static inline void
 rq_relock(struct rq *rq, struct rq_flags *rf)
 	__acquires(rq->lock)
 {
-	raw_spin_lock(&rq->lock);
+	raw_spin_lock_nested(&rq->lock, RQ_LOCK_SUBCLASS(rq));
 	rq_repin_lock(rq, rf);
 }
 
-- 
2.9.3.1.gcba166c.dirty

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ