lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 14 Aug 2020 23:19:06 -0400
From:   "Joel Fernandes (Google)" <joel@...lfernandes.org>
To:     linux-kernel@...r.kernel.org
Cc:     Vineeth Pillai <viremana@...ux.microsoft.com>,
        Joel Fernandes <joel@...lfernandes.org>,
        Aaron Lu <aaron.lwe@...il.com>,
        Aubrey Li <aubrey.li@...ux.intel.com>,
        Julien Desfossez <jdesfossez@...italocean.com>,
        Kees Cook <keescook@...omium.org>,
        "Paul E. McKenney" <paulmck@...nel.org>,
        Paul Turner <pjt@...gle.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Steven Rostedt <rostedt@...dmis.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Tim Chen <tim.c.chen@...el.com>,
        Tim Chen <tim.c.chen@...ux.intel.com>,
        Vincent Guittot <vincent.guittot@...aro.org>,
        x86@...nel.org (maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)),
        fweisbec@...il.com, kerrnel@...gle.com,
        Phil Auld <pauld@...hat.com>,
        Valentin Schneider <valentin.schneider@....com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Chen Yu <yu.c.chen@...el.com>,
        Christian Brauner <christian.brauner@...ntu.com>
Subject: [PATCH RFC 10/12] sched/coresched: Make core_pick_seq per run-queue

From: Vineeth Pillai <viremana@...ux.microsoft.com>

core_pick_seq is a core wide counter to identify if a task pick has
been made for a CPU by its sibling. But during hotplug scenarios,
pick cannot be made for CPUs that are offline and when they come up,
they get tricked because the counter is incremented.

So, make core_pick_seq per run-queue.

Signed-off-by: Vineeth Pillai <viremana@...ux.microsoft.com>
Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>
---
 kernel/sched/core.c  | 19 ++++++++++---------
 kernel/sched/sched.h |  2 +-
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3e9df8221c62..48a49168e57f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4623,9 +4623,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	 * pointers are all still valid), and we haven't scheduled the last
 	 * pick yet, do so now.
 	 */
-	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
-	    rq->core->core_pick_seq != rq->core_sched_seq) {
-		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
+	if (rq->core_pick_seq == rq->core->core_task_seq &&
+	    rq->core_pick_seq != rq->core_sched_seq) {
+		WRITE_ONCE(rq->core_sched_seq, rq->core_pick_seq);
 
 		next = rq->core_pick;
 		if (next != prev) {
@@ -4635,7 +4635,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
 		trace_printk("pick pre selected (%u %u %u): %s/%d %lx\n",
 			     rq->core->core_task_seq,
-			     rq->core->core_pick_seq,
+			     rq->core_pick_seq,
 			     rq->core_sched_seq,
 			     next->comm, next->pid,
 			     next->core_cookie);
@@ -4649,7 +4649,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	smt_mask = cpu_smt_mask(cpu);
 
 	/*
-	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
+	 * core->core_task_seq, rq->core_pick_seq, rq->core_sched_seq
 	 *
 	 * @task_seq guards the task state ({en,de}queues)
 	 * @pick_seq is the @task_seq we did a selection on
@@ -4667,8 +4667,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 		struct rq *rq_i = cpu_rq(i);
 
 		trace_printk("CPU %d is in smt_mask, resetting\n", i);
-
-		rq_i->core_pick = NULL;
+		if (rq_i->core_pick) {
+			WRITE_ONCE(rq_i->core_sched_seq, rq_i->core_pick_seq);
+			rq_i->core_pick = NULL;
+		}
 
 		if (rq_i->core_forceidle) {
 			need_sync = true;
@@ -4771,9 +4773,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 next_class:;
 	}
 
-	rq->core->core_pick_seq = rq->core->core_task_seq;
 	next = rq->core_pick;
-	rq->core_sched_seq = rq->core->core_pick_seq;
 
 	/* Something should have been selected for current CPU */
 	WARN_ON_ONCE(!next);
@@ -4801,6 +4801,7 @@ next_class:;
 			continue;
 
 		if (rq_i->curr != rq_i->core_pick) {
+			WRITE_ONCE(rq_i->core_pick_seq, rq->core->core_task_seq);
 			trace_printk("IPI(%d)\n", i);
 			resched_curr(rq_i);
 		}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 2922e171a1f0..c7caece2df6e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1036,6 +1036,7 @@ struct rq {
 	/* per rq */
 	struct rq		*core;
 	struct task_struct	*core_pick;
+	unsigned int		core_pick_seq;
 	unsigned int		core_enabled;
 	unsigned int		core_sched_seq;
 	struct rb_root		core_tree;
@@ -1045,7 +1046,6 @@ struct rq {
 
 	/* shared state */
 	unsigned int		core_task_seq;
-	unsigned int		core_pick_seq;
 	unsigned long		core_cookie;
 	unsigned int		core_unsafe_nest;
 #endif
-- 
2.28.0.220.ged08abb693-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ