lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1413591782-23453-7-git-send-email-paul.gortmaker@windriver.com>
Date:	Fri, 17 Oct 2014 20:23:01 -0400
From:	Paul Gortmaker <paul.gortmaker@...driver.com>
To:	<linux-rt-users@...r.kernel.org>
CC:	<linux-kernel@...r.kernel.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
	"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
	Steven Rostedt <rostedt@...dmis.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Paul Gortmaker <paul.gortmaker@...driver.com>
Subject: [PATCH 6/7] simplewait: don't run a possibly infinite number of wake under raw lock

The simple wait queues use a raw lock in order to be functional
for the preempt-rt kernels.  PeterZ suggested[1] the following
change to ensure we come up for air now and again in order to be
deterministic.

I'm not really in love with the solution of passing the flags around,
but couldn't think of anything cleaner to achieve the same thing.

[1] http://marc.info/?l=linux-kernel&m=138089860308430

Cc: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Paul Gortmaker <paul.gortmaker@...driver.com>

diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2a57e00250f9..46e2591c22b6 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -262,7 +262,7 @@ void __swake_up(struct swait_head *q, unsigned int mode, int nr);
 void __cwake_up_locked_key(struct cwait_head *q, unsigned int mode, void *key);
 void __cwake_up_sync_key(struct cwait_head *q, unsigned int mode, int nr, void *key);
 void __cwake_up_locked(struct cwait_head *q, unsigned int mode, int nr);
-void __swake_up_locked(struct swait_head *q, unsigned int mode, int nr);
+void __swake_up_locked(struct swait_head *q, unsigned int mode, int nr, unsigned long *flags);
 void __cwake_up_sync(struct cwait_head *q, unsigned int mode, int nr);
 void __cwake_up_bit(struct cwait_head *, void *, int);
 int __cwait_on_bit(struct cwait_head *, struct cwait_bit *, cwait_bit_action_f *, unsigned);
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 7a165c697956..87ef42158fdf 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -32,7 +32,7 @@ void complete(struct completion *x)
 
 	raw_spin_lock_irqsave(&x->wait.lock, flags);
 	x->done++;
-	__swake_up_locked(&x->wait, TASK_NORMAL, 1);
+	__swake_up_locked(&x->wait, TASK_NORMAL, 1, &flags);
 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
@@ -52,7 +52,7 @@ void complete_all(struct completion *x)
 
 	raw_spin_lock_irqsave(&x->wait.lock, flags);
 	x->done += UINT_MAX/2;
-	__swake_up_locked(&x->wait, TASK_NORMAL, 0);
+	__swake_up_locked(&x->wait, TASK_NORMAL, 0, &flags);
 	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 634427c25945..25e5886ed8d9 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -123,8 +123,12 @@ static void __cwake_up_common(struct cwait_head *q, unsigned int mode,
 	}
 }
 
+/*
+ * The swait version gets the extra flags arg so that we can mitigate
+ * against a possibly large number of wakeups done under a raw lock.
+ */
 static void __swake_up_common(struct swait_head *q, unsigned int mode,
-			      int nr_exclusive)
+			      int nr_exclusive, unsigned long *flags)
 {
 	struct swait *curr, *next;
 	int woken = 0;
@@ -146,6 +150,10 @@ static void __swake_up_common(struct swait_head *q, unsigned int mode,
 				break;
 		}
 
+		if (need_resched()) {
+			raw_spin_unlock_irqrestore(&q->lock, *flags);
+			raw_spin_lock_irqsave(&q->lock, *flags);
+		}
 	}
 }
 
@@ -178,7 +186,7 @@ void __swake_up(struct swait_head *q, unsigned int mode, int nr_exclusive)
 		return;
 
 	raw_spin_lock_irqsave(&q->lock, flags);
-	__swake_up_common(q, mode, nr_exclusive);
+	__swake_up_common(q, mode, nr_exclusive, &flags);
 	raw_spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL(__swake_up);
@@ -192,12 +200,13 @@ void __cwake_up_locked(struct cwait_head *q, unsigned int mode, int nr)
 }
 EXPORT_SYMBOL_GPL(__cwake_up_locked);
 
-void __swake_up_locked(struct swait_head *q, unsigned int state, int nr)
+void __swake_up_locked(struct swait_head *q, unsigned int state, int nr,
+		       unsigned long *flags)
 {
 	if (!swait_active(q))
 		return;
 
-	__swake_up_common(q, state, nr);
+	__swake_up_common(q, state, nr, flags);
 }
 EXPORT_SYMBOL_GPL(__swake_up_locked);
 
-- 
1.9.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ