lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-id: <1446811367-23783-10-git-send-email-a.krasnov@samsung.com>
Date:	Fri, 06 Nov 2015 15:02:43 +0300
From:	Arseniy Krasnov <a.krasnov@...sung.com>
To:	linux@....linux.org.uk, mingo@...hat.com, peterz@...radead.org
Cc:	a.krasnov@...sung.com, v.tyrtov@...sung.com,
	s.rogachev@...sung.com, linux-kernel@...r.kernel.org,
	Tarek Dakhran <t.dakhran@...sung.com>,
	Sergey Dyasly <s.dyasly@...sung.com>,
	Dmitriy Safonov <d.safonov@...tner.samsung.com>,
	Ilya Maximets <i.maximets@...sung.com>
Subject: [PATCH 09/13] hperf_hmp: one way balancing function.

	Almost identical functions which push/pull task from/to current CPU
to/from another cluster. Called when balancing between clusters is broken and we
need to fix it.

Signed-off-by: Tarek Dakhran <t.dakhran@...sung.com>
Signed-off-by: Sergey Dyasly <s.dyasly@...sung.com>
Signed-off-by: Dmitriy Safonov <d.safonov@...tner.samsung.com>
Signed-off-by: Arseniy Krasnov <a.krasnov@...sung.com>
Signed-off-by: Ilya Maximets <i.maximets@...sung.com>
---
 kernel/sched/fair.c | 254 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 254 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 028d329..4fda1ec 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7519,6 +7519,260 @@ unlock:
 
 	return ld_moved;
 }
+
+/* Get idlest cpu from opposite domain of this_cpu */
+static int get_idlest_cpu(struct sched_domain *sd, int this_cpu)
+{
+	struct sched_group *opposite_sg;
+	struct cpumask *opposite_mask;
+	unsigned long load = ULONG_MAX;
+	int idlest_cpu = -1;
+	int cpu;
+
+	opposite_sg = get_opposite_group(sd, cpu_is_fastest(this_cpu));
+	opposite_mask = sched_group_cpus(opposite_sg);
+
+	for_each_cpu_and(cpu, opposite_mask, cpu_online_mask) {
+		if (cpu_rq(cpu)->load.weight < load) {
+			load = cpu_rq(cpu)->load.weight;
+			idlest_cpu = cpu;
+		}
+	}
+	return idlest_cpu;
+}
+
+/**
+ * move_a15_to_a7(): Moves one task from A15 to A7.
+ * @sd: Current sched domain.
+ * @this_cpu: without NO_HZ same as smp_processor_id().
+ *
+ * Returns moved weight.
+ *
+ * Chooses task to migrate by druntime.
+ */
+static unsigned int move_a15_to_a7(struct sched_domain *sd, int this_cpu)
+{
+	struct task_struct *task_to_move;
+	struct rq *local_rq = NULL;
+	struct rq *foreign_rq = NULL;
+	int local_stopper_flag = 0;
+	int foreign_stopper_flag = 0;
+	unsigned long local_flags;
+	unsigned int ld_moved = 0;
+
+	local_rq = cpu_rq(this_cpu);
+	local_irq_save(local_flags);
+
+	if (!cpu_is_fastest(this_cpu)) {
+		/* this A7 pulls task from A15 */
+		foreign_rq = get_unfair_rq(sd, this_cpu);
+
+		if (!foreign_rq) {
+			local_irq_restore(local_flags);
+			return 0;
+		}
+
+		double_lock_balance(foreign_rq, local_rq);
+
+		if (foreign_rq->active_balance)
+			goto unlock;
+
+		if (local_rq->active_balance)
+			goto unlock;
+
+		if (foreign_rq->cfs.h_nr_running <= 1)
+			goto unlock;
+
+		task_to_move = get_migration_candidate(sd, foreign_rq, 0,
+						       this_cpu);
+
+		if (!task_to_move)
+			goto unlock;
+
+		ld_moved = try_to_move_task(task_to_move, this_cpu,
+						&foreign_stopper_flag);
+
+		if (!ld_moved) {
+			task_to_move->se.migrate_candidate = 0;
+			goto unlock;
+		}
+
+		if (foreign_stopper_flag) {
+			foreign_rq->active_balance = 1;
+			foreign_rq->push_cpu = this_cpu;
+			foreign_rq->migrate_task = task_to_move;
+		}
+	} else {
+		/* this A15 push task to A7 */
+		int dst_cpu = get_idlest_cpu(sd, this_cpu);
+
+		if (dst_cpu == -1) {
+			local_irq_restore(local_flags);
+			return 0;
+		}
+
+		foreign_rq = cpu_rq(dst_cpu);
+		raw_spin_lock(&foreign_rq->lock);
+		double_lock_balance(foreign_rq, local_rq);
+
+		if (local_rq->cfs.h_nr_running <= 1)
+			goto unlock;
+
+		if (foreign_rq->active_balance)
+			goto unlock;
+
+		if (local_rq->active_balance)
+			goto unlock;
+
+		task_to_move = get_migration_candidate(sd, local_rq, 0,
+						       foreign_rq->cpu);
+
+		if (!task_to_move)
+			goto unlock;
+
+		ld_moved = try_to_move_task(task_to_move, dst_cpu,
+						&local_stopper_flag);
+
+		if (!ld_moved) {
+			task_to_move->se.migrate_candidate = 0;
+			goto unlock;
+		}
+
+		if (local_stopper_flag) {
+			local_rq->active_balance = 1;
+			local_rq->push_cpu = dst_cpu;
+			local_rq->migrate_task = task_to_move;
+		}
+	}
+unlock:
+	double_rq_unlock(local_rq, foreign_rq);
+	local_irq_restore(local_flags);
+
+	if (foreign_stopper_flag)
+		stop_one_cpu_nowait(foreign_rq->cpu,
+				    active_load_balance_cpu_stop, foreign_rq,
+				    &foreign_rq->active_balance_work);
+
+	if (local_stopper_flag)
+		stop_one_cpu_nowait(local_rq->cpu,
+				    active_load_balance_cpu_stop, local_rq,
+				    &local_rq->active_balance_work);
+
+	return ld_moved;
+}
+
+/**
+ * move_a7_to_a15(): Moves one task from A7 to A15.
+ * @sd: Current sched domain.
+ * @this_cpu: without NO_HZ same as smp_processor_id().
+ *
+ * Returns moved weight.
+ *
+ * Chooses task to migrate by druntime.
+ */
+static unsigned int move_a7_to_a15(struct sched_domain *sd, int this_cpu)
+{
+	struct task_struct *task_to_move;
+	struct rq *local_rq = NULL;
+	struct rq *foreign_rq = NULL;
+	int local_stopper_flag = 0;
+	int foreign_stopper_flag = 0;
+	unsigned long local_flags;
+	unsigned int ld_moved = 0;
+
+	local_rq = cpu_rq(this_cpu);
+	local_irq_save(local_flags);
+
+	if (cpu_is_fastest(this_cpu)) {
+		/* this A15 pulls task from A7 */
+		foreign_rq = get_unfair_rq(sd, this_cpu);
+
+		if (!foreign_rq) {
+			local_irq_restore(local_flags);
+			return 0;
+		}
+		double_lock_balance(foreign_rq, local_rq);
+
+		if (local_rq->active_balance)
+			goto unlock;
+
+		if (foreign_rq->active_balance)
+			goto unlock;
+
+		task_to_move = get_migration_candidate(sd, foreign_rq, 0,
+						       this_cpu);
+
+		if (!task_to_move)
+			goto unlock;
+
+		ld_moved = try_to_move_task(task_to_move, this_cpu,
+						&foreign_stopper_flag);
+
+		if (!ld_moved) {
+			task_to_move->se.migrate_candidate = 0;
+			goto unlock;
+		}
+
+		if (foreign_stopper_flag) {
+			foreign_rq->active_balance = 1;
+			foreign_rq->push_cpu = this_cpu;
+			foreign_rq->migrate_task = task_to_move;
+		}
+	} else {
+		/* this A7 push task to A15*/
+		int dst_cpu = get_idlest_cpu(sd, this_cpu);
+
+		if (dst_cpu == -1) {
+			local_irq_restore(local_flags);
+			return 0;
+		}
+
+		foreign_rq = cpu_rq(dst_cpu);
+		raw_spin_lock(&foreign_rq->lock);
+		double_lock_balance(foreign_rq, local_rq);
+
+		if (foreign_rq->active_balance)
+			goto unlock;
+
+		if (local_rq->active_balance)
+			goto unlock;
+
+		task_to_move = get_migration_candidate(sd, local_rq, 0,
+						       foreign_rq->cpu);
+
+		if (!task_to_move)
+			goto unlock;
+
+		ld_moved = try_to_move_task(task_to_move, dst_cpu,
+						&local_stopper_flag);
+
+		if (!ld_moved) {
+			task_to_move->se.migrate_candidate = 0;
+			goto unlock;
+		}
+
+		if (local_stopper_flag) {
+			local_rq->active_balance = 1;
+			local_rq->push_cpu = dst_cpu;
+			local_rq->migrate_task = task_to_move;
+		}
+	}
+unlock:
+	double_rq_unlock(local_rq, foreign_rq);
+	local_irq_restore(local_flags);
+
+	if (foreign_stopper_flag)
+		stop_one_cpu_nowait(foreign_rq->cpu,
+				    active_load_balance_cpu_stop, foreign_rq,
+				    &foreign_rq->active_balance_work);
+
+	if (local_stopper_flag)
+		stop_one_cpu_nowait(local_rq->cpu,
+				    active_load_balance_cpu_stop, local_rq,
+				    &local_rq->active_balance_work);
+
+	return ld_moved;
+}
 #endif /* CONFIG_HPERF_HMP */
 
 /*
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ