lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1426685070-4419-2-git-send-email-xlpang@126.com>
Date:	Wed, 18 Mar 2015 21:24:30 +0800
From:	Xunlei Pang <xlpang@....com>
To:	linux-kernel@...r.kernel.org
Cc:	Peter Zijlstra <peterz@...radead.org>,
	Steven Rostedt <rostedt@...dmis.org>,
	Juri Lelli <juri.lelli@...il.com>,
	Xunlei Pang <pang.xunlei@...aro.org>
Subject: [PATCH RESEND 2/2] sched/rt: Consider deadline tasks in cpupri_find()

From: Xunlei Pang <pang.xunlei@...aro.org>

Currently, RT global scheduling doesn't factor deadline
tasks, this may cause some problems.

See a case below:
On a 3 CPU system, CPU0 has one running deadline task,
CPU1 has one running low priority RT task or idle, CPU3
has one running high priority RT task. When another mid
priority RT task is woken on CPU3, it will be pushed to
CPU0, while it is reasonable to put it on CPU1. Worse
still, this disturbs(causing sched IPI, rq locking, etc)
higher-class deadline tasks on CPU0.

This patch eliminates this issue by filtering CPUs that
have runnable deadline tasks, using cpudl->free_cpus in
cpupri_find() if there's any deadline task.

NOTE: We want to make the most use of percpu local_cpu_mask
to save an extra mask allocation, so always passing a non-NULL
lowest_mask to cpupri_find().

Signed-off-by: Xunlei Pang <pang.xunlei@...aro.org>
---
 kernel/sched/core.c   |  3 ++-
 kernel/sched/cpupri.c | 29 +++++++++++------------------
 kernel/sched/cpupri.h |  3 ++-
 kernel/sched/rt.c     | 14 ++++++++++----
 4 files changed, 25 insertions(+), 24 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 17141da..86450df 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5644,8 +5644,9 @@ static int init_rootdomain(struct root_domain *rd)
 	if (cpudl_init(&rd->cpudl) != 0)
 		goto free_dlo_mask;
 
-	if (cpupri_init(&rd->cpupri) != 0)
+	if (cpupri_init(&rd->cpupri, &rd->cpudl) != 0)
 		goto free_rto_mask;
+
 	return 0;
 
 free_rto_mask:
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 981fcd7..1a1e38d 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -31,6 +31,7 @@
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
 #include <linux/slab.h>
+#include "cpudeadline.h"
 #include "cpupri.h"
 
 /* Convert between a 140 based task->prio, and our 102 based cpupri */
@@ -54,7 +55,7 @@ static int convert_prio(int prio)
  * cpupri_find - find the best (lowest-pri) CPU in the system
  * @cp: The cpupri context
  * @p: The task
- * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
+ * @lowest_mask: A mask to fill in with selected CPUs (not NULL)
  *
  * Note: This function returns the recommended CPUs as calculated during the
  * current invocation.  By the time the call returns, the CPUs may have in
@@ -75,6 +76,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
 
 	for (idx = 0; idx < task_pri; idx++) {
 		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
+		struct cpudl *dl = cp->cpudl;
 		int skip = 0;
 
 		if (!atomic_read(&(vec)->count))
@@ -103,24 +105,12 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
 		if (skip)
 			continue;
 
-		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+		cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+		if (dl->size)
+			cpumask_and(lowest_mask, lowest_mask, dl->free_cpus);
+		if (cpumask_any(lowest_mask) >= nr_cpu_ids)
 			continue;
 
-		if (lowest_mask) {
-			cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
-
-			/*
-			 * We have to ensure that we have at least one bit
-			 * still set in the array, since the map could have
-			 * been concurrently emptied between the first and
-			 * second reads of vec->mask.  If we hit this
-			 * condition, simply act as though we never hit this
-			 * priority level and continue on.
-			 */
-			if (cpumask_any(lowest_mask) >= nr_cpu_ids)
-				continue;
-		}
-
 		return 1;
 	}
 
@@ -202,10 +192,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 /**
  * cpupri_init - initialize the cpupri structure
  * @cp: The cpupri context
+ * @cpudl: The cpudl context of the same root domain
  *
  * Return: -ENOMEM on memory allocation failure.
  */
-int cpupri_init(struct cpupri *cp)
+int cpupri_init(struct cpupri *cp, struct cpudl *cpudl)
 {
 	int i;
 
@@ -226,6 +217,8 @@ int cpupri_init(struct cpupri *cp)
 	for_each_possible_cpu(i)
 		cp->cpu_to_pri[i] = CPUPRI_INVALID;
 
+	cp->cpudl = cpudl;
+
 	return 0;
 
 cleanup:
diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h
index 63cbb9c..6fee80b 100644
--- a/kernel/sched/cpupri.h
+++ b/kernel/sched/cpupri.h
@@ -18,13 +18,14 @@ struct cpupri_vec {
 struct cpupri {
 	struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
 	int *cpu_to_pri;
+	struct cpudl *cpudl;
 };
 
 #ifdef CONFIG_SMP
 int  cpupri_find(struct cpupri *cp,
 		 struct task_struct *p, struct cpumask *lowest_mask);
 void cpupri_set(struct cpupri *cp, int cpu, int pri);
-int cpupri_init(struct cpupri *cp);
+int cpupri_init(struct cpupri *cp, struct cpudl *cpudl);
 void cpupri_cleanup(struct cpupri *cp);
 #endif
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f4d4b07..564b19a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1354,14 +1354,22 @@ out:
 	return cpu;
 }
 
+static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
+
 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 {
+	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
+
+	/* Make sure the mask is initialized first */
+	if (unlikely(!lowest_mask))
+		return;
+
 	/*
 	 * Current can't be migrated, useless to reschedule,
 	 * let's hope p can move out.
 	 */
 	if (rq->curr->nr_cpus_allowed == 1 ||
-	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
+	    !cpupri_find(&rq->rd->cpupri, rq->curr, lowest_mask))
 		return;
 
 	/*
@@ -1369,7 +1377,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 	 * see if it is pushed or pulled somewhere else.
 	 */
 	if (p->nr_cpus_allowed != 1
-	    && cpupri_find(&rq->rd->cpupri, p, NULL))
+	    && cpupri_find(&rq->rd->cpupri, p, lowest_mask))
 		return;
 
 	/*
@@ -1531,8 +1539,6 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
 	return NULL;
 }
 
-static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
-
 static int find_lowest_rq(struct task_struct *task)
 {
 	struct sched_domain *sd;
-- 
1.9.1


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ