[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200121063307.17221-2-parth@linux.ibm.com>
Date: Tue, 21 Jan 2020 12:03:03 +0530
From: Parth Shah <parth@...ux.ibm.com>
To: linux-kernel@...r.kernel.org, linux-pm@...r.kernel.org
Cc: peterz@...radead.org, mingo@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, patrick.bellasi@...bug.net,
valentin.schneider@....com, pavel@....cz, dsmythies@...us.net,
qperret@...gle.com, tim.c.chen@...ux.intel.com
Subject: [RFC v6 1/5] sched: Introduce switch to enable TurboSched for task packing
Create a static key which allows to enable or disable TurboSched feature at
runtime.
This key is added in order to enable the TurboSched feature only when
required. This helps in optimizing the scheduler fast-path when the
TurboSched feature is disabled.
Also provide get/put methods to keep track of the tasks using the
TurboSched feature and also refcount classified background tasks. This
allows to enable the feature on setting first task classified as background
noise, similarly disable the feature on unsetting of such last task.
Signed-off-by: Parth Shah <parth@...ux.ibm.com>
---
kernel/sched/core.c | 25 +++++++++++++++++++++++++
kernel/sched/sched.h | 12 ++++++++++++
2 files changed, 37 insertions(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a9e5d157b1a5..dfbb52d66b29 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -73,6 +73,31 @@ __read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
+#ifdef CONFIG_SCHED_SMT
+DEFINE_STATIC_KEY_FALSE(__turbo_sched_enabled);
+static DEFINE_MUTEX(turbo_sched_lock);
+static int turbo_sched_count;
+
+void turbo_sched_get(void)
+{
+ mutex_lock(&turbo_sched_lock);
+ if (!turbo_sched_count++)
+ static_branch_enable(&__turbo_sched_enabled);
+ mutex_unlock(&turbo_sched_lock);
+}
+
+void turbo_sched_put(void)
+{
+ mutex_lock(&turbo_sched_lock);
+ if (!--turbo_sched_count)
+ static_branch_disable(&__turbo_sched_enabled);
+ mutex_unlock(&turbo_sched_lock);
+}
+#else
+void turbo_sched_get(void) { return ; }
+void turbo_sched_get(void) { return ; }
+#endif
+
/*
* __task_rq_lock - lock the rq @p resides on.
*/
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index edae9277e48d..f841297b7d56 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2497,3 +2497,15 @@ static inline void membarrier_switch_mm(struct rq *rq,
{
}
#endif
+
+void turbo_sched_get(void);
+void turbo_sched_put(void);
+
+#ifdef CONFIG_SCHED_SMT
+DECLARE_STATIC_KEY_FALSE(__turbo_sched_enabled);
+
+static inline bool is_turbosched_enabled(void)
+{
+ return static_branch_unlikely(&__turbo_sched_enabled);
+}
+#endif
--
2.17.2
Powered by blists - more mailing lists