[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1409848265-17150-5-git-send-email-klamm@yandex-team.ru>
Date: Thu, 4 Sep 2014 20:30:51 +0400
From: klamm@...dex-team.ru
To: peterz@...radead.org, mingo@...hat.com,
linux-kernel@...r.kernel.org
Cc: stfomichev@...dex-team.ru, Roman Gushchin <klamm@...dex-team.ru>
Subject: [PATCH 05/19] smart: CPU selection logic
From: Roman Gushchin <klamm@...dex-team.ru>
This commit contains the most important code: CPU selection logic.
It's implemented by smart_find_lowest_rq() function and some helper
functions.
The logic is relatively simple:
1) try to find free core on local node (starting with previous task's CPU)
2) try to find free core on next remote node
3) try to find free SMT thread on local node
4) try to find free SMT thread on remote node
5) try to find SMT thread with minimum rt tasks
If we find free CPU on any step we try to acquire it and if it's successful we
stop further processing and quit; otherwise, if CPU is locked, we restart the
search.
Signed-off-by: Roman Gushchin <klamm@...dex-team.ru>
---
kernel/sched/rt.c | 32 ++++++++++++++++++++++++++++++++
kernel/sched/sched.h | 43 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 75 insertions(+)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c71b9a3..805951b 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2286,4 +2286,36 @@ void build_smart_topology(void)
mutex_unlock(&smart_mutex);
}
+static int smart_find_lowest_rq(struct task_struct *task, bool wakeup)
+{
+ int prev_cpu = task_cpu(task);
+ int best_cpu;
+ int attempts;
+
+ if (task->nr_cpus_allowed == 1)
+ return -1; /* No other targets possible */
+
+ rcu_read_lock();
+
+
+ for (attempts = 3; attempts; attempts--) {
+ best_cpu = find_rt_free_core(prev_cpu, task);
+ if (best_cpu == -1) {
+ best_cpu = find_rt_best_thread(prev_cpu, task);
+
+ break;
+ }
+
+ if (!acquire_core(best_cpu))
+ continue;
+
+ if (likely(core_is_rt_free(best_cpu)))
+ break;
+
+ release_core(best_cpu);
+ }
+
+ rcu_read_unlock();
+ return best_cpu;
+}
#endif /* CONFIG_SMART */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4603096..b662a89 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1501,6 +1501,49 @@ static inline int find_rt_free_core(int start_cpu, struct task_struct *task)
return -1;
}
+static inline int find_rt_best_thread(int start_cpu, struct task_struct *task)
+{
+ int core;
+ int cpu;
+ struct rq *rq;
+ unsigned int min_running = 2;
+ int best_cpu = -1;
+ int nr_running;
+
+ /* Local cores */
+ core = cpu_core_id(start_cpu);
+ do {
+ cpu = core_rt_free_thread(core);
+ if (cpu != -1 && cpumask_test_cpu(cpu, tsk_cpus_allowed(task)))
+ return cpu;
+ } while (core = next_core(core), core != cpu_core_id(start_cpu));
+
+ /* Remote cores */
+ core = core_node_sibling(start_cpu);
+ do {
+ cpu = core_rt_free_thread(core);
+ if (cpu != -1 && cpumask_test_cpu(cpu, tsk_cpus_allowed(task)))
+ return cpu;
+ } while (core = next_core(core), core != core_node_sibling(start_cpu));
+
+ /* Find local thread with min. number of tasks */
+ for_each_cpu(cpu, topology_core_cpumask(start_cpu)) {
+ rq = cpu_rq(cpu);
+ nr_running = rq->rt.rt_nr_running;
+ if (nr_running < min_running &&
+ cpumask_test_cpu(cpu, tsk_cpus_allowed(task))) {
+ min_running = nr_running;
+ best_cpu = cpu;
+ }
+ }
+
+ if (best_cpu != -1 &&
+ min_running == cpu_rq(start_cpu)->rt.rt_nr_running)
+ best_cpu = -1;
+
+ return best_cpu;
+}
+
void build_smart_topology(void);
#else /* CONFIG_SMART */
--
1.9.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists