lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1490265163-29981-8-git-send-email-byungchul.park@lge.com>
Date:   Thu, 23 Mar 2017 19:32:42 +0900
From:   Byungchul Park <byungchul.park@....com>
To:     <peterz@...radead.org>, <mingo@...nel.org>
CC:     <linux-kernel@...r.kernel.org>, <juri.lelli@...il.com>,
        <rostedt@...dmis.org>, <kernel-team@....com>
Subject: [PATCH 7/8] sched/deadline: Factor out the modifying of cpudl's heap tree

Currently, cpudl_{set,clear} is responsible for manipulating cpudl's
heap tree and free_cpus list under lock protection. However, operation
manipulating the heap tree itself is reusable.

Actually, the operation is useful when picking up the second maximum
node from the tree, where it does not need to manipulate free_cpus list
but only needs to manipulate the tree.

Signed-off-by: Byungchul Park <byungchul.park@....com>
---
 kernel/sched/cpudeadline.c | 95 ++++++++++++++++++++++++++++++----------------
 1 file changed, 62 insertions(+), 33 deletions(-)

diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 21404b8..453159a 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -116,6 +116,64 @@ static inline u64 cpudl_maximum_dl(struct cpudl *cp)
 	return cp->elements[0].dl;
 }
 
+/*
+ * __cpudl_clear - remove a cpu from the cpudl max-heap
+ * @cp: the cpudl max-heap context
+ * @cpu: the target cpu
+ *
+ * Notes: assumes cpu_rq(cpu)->lock and cpudl->lock are locked
+ *
+ * Returns: (void)
+ */
+static void __cpudl_clear(struct cpudl *cp, int cpu)
+{
+	int old_idx, new_cpu;
+
+	old_idx = cp->elements[cpu].idx;
+	if (old_idx == IDX_INVALID) {
+		/*
+		 * Nothing to remove if old_idx was invalid.
+		 * This could happen if a rq_offline_dl is
+		 * called for a CPU without -dl tasks running.
+		 */
+	} else {
+		new_cpu = cp->elements[cp->size - 1].cpu;
+		cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
+		cp->elements[old_idx].cpu = new_cpu;
+		cp->size--;
+		cp->elements[new_cpu].idx = old_idx;
+		cp->elements[cpu].idx = IDX_INVALID;
+		cpudl_heapify(cp->elements, cp->size, old_idx);
+	}
+}
+
+/*
+ * __cpudl_set - update the cpudl max-heap
+ * @cp: the cpudl max-heap context
+ * @cpu: the target cpu
+ * @dl: the new earliest deadline for this cpu
+ *
+ * Notes: assumes cpu_rq(cpu)->lock and cpudl->lock are locked
+ *
+ * Returns: (void)
+ */
+static void __cpudl_set(struct cpudl *cp, int cpu, u64 dl)
+{
+	int old_idx;
+
+	old_idx = cp->elements[cpu].idx;
+	if (old_idx == IDX_INVALID) {
+		int new_idx = cp->size++;
+		cp->elements[new_idx].dl = dl;
+		cp->elements[new_idx].cpu = cpu;
+		cp->elements[cpu].idx = new_idx;
+		cpudl_heapify_up(cp->elements, cp->size, new_idx);
+	} else {
+		cp->elements[old_idx].dl = dl;
+		cpudl_heapify(cp->elements, cp->size, old_idx);
+	}
+}
+
 static int cpudl_fast_find(struct cpudl *cp, struct task_struct *p)
 {
 	const struct sched_dl_entity *dl_se = &p->dl;
@@ -176,31 +234,14 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
  */
 void cpudl_clear(struct cpudl *cp, int cpu)
 {
-	int old_idx, new_cpu;
 	unsigned long flags;
 
 	WARN_ON(!cpu_present(cpu));
 
 	raw_spin_lock_irqsave(&cp->lock, flags);
-
-	old_idx = cp->elements[cpu].idx;
-	if (old_idx == IDX_INVALID) {
-		/*
-		 * Nothing to remove if old_idx was invalid.
-		 * This could happen if a rq_offline_dl is
-		 * called for a CPU without -dl tasks running.
-		 */
-	} else {
-		new_cpu = cp->elements[cp->size - 1].cpu;
-		cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl;
-		cp->elements[old_idx].cpu = new_cpu;
-		cp->size--;
-		cp->elements[new_cpu].idx = old_idx;
-		cp->elements[cpu].idx = IDX_INVALID;
-		cpudl_heapify(cp->elements, cp->size, old_idx);
-
+	__cpudl_clear(cp, cpu);
+	if (cp->elements[cpu].idx != IDX_INVALID)
 		cpumask_set_cpu(cpu, cp->free_cpus);
-	}
 	raw_spin_unlock_irqrestore(&cp->lock, flags);
 }
 
@@ -216,26 +257,14 @@ void cpudl_clear(struct cpudl *cp, int cpu)
  */
 void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
 {
-	int old_idx;
 	unsigned long flags;
 
 	WARN_ON(!cpu_present(cpu));
 
 	raw_spin_lock_irqsave(&cp->lock, flags);
-
-	old_idx = cp->elements[cpu].idx;
-	if (old_idx == IDX_INVALID) {
-		int new_idx = cp->size++;
-		cp->elements[new_idx].dl = dl;
-		cp->elements[new_idx].cpu = cpu;
-		cp->elements[cpu].idx = new_idx;
-		cpudl_heapify_up(cp->elements, cp->size, new_idx);
+	__cpudl_set(cp, cpu, dl);
+	if (cp->elements[cpu].idx == IDX_INVALID)
 		cpumask_clear_cpu(cpu, cp->free_cpus);
-	} else {
-		cp->elements[old_idx].dl = dl;
-		cpudl_heapify(cp->elements, cp->size, old_idx);
-	}
-
 	raw_spin_unlock_irqrestore(&cp->lock, flags);
 }
 
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ