[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210401133917.111845286@infradead.org>
Date: Thu, 01 Apr 2021 15:10:13 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: joel@...lfernandes.org, chris.hyser@...cle.com, joshdon@...gle.com,
mingo@...nel.org, vincent.guittot@...aro.org,
valentin.schneider@....com, mgorman@...e.de
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org, tj@...nel.org,
tglx@...utronix.de
Subject: [PATCH 1/9] sched: Allow sched_core_put() from atomic context
Stuff the meat of sched_core_put() into a work such that we can use
sched_core_put() from atomic context.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/sched/core.c | 33 +++++++++++++++++++++++++++------
1 file changed, 27 insertions(+), 6 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -223,7 +223,7 @@ static struct task_struct *sched_core_ne
*/
static DEFINE_MUTEX(sched_core_mutex);
-static int sched_core_count;
+static atomic_t sched_core_count;
static struct cpumask sched_core_mask;
static void __sched_core_flip(bool enabled)
@@ -286,18 +286,39 @@ static void __sched_core_disable(void)
void sched_core_get(void)
{
+ if (atomic_inc_not_zero(&sched_core_count))
+ return;
+
mutex_lock(&sched_core_mutex);
- if (!sched_core_count++)
+ if (!atomic_read(&sched_core_count))
__sched_core_enable();
+
+ smp_mb__before_atomic();
+ atomic_inc(&sched_core_count);
mutex_unlock(&sched_core_mutex);
}
-void sched_core_put(void)
+static void __sched_core_put(struct work_struct *work)
{
- mutex_lock(&sched_core_mutex);
- if (!--sched_core_count)
+ if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
__sched_core_disable();
- mutex_unlock(&sched_core_mutex);
+ mutex_unlock(&sched_core_mutex);
+ }
+}
+
+void sched_core_put(void)
+{
+ static DECLARE_WORK(_work, __sched_core_put);
+
+ /*
+ * "There can only be one"
+ *
+ * Either this is the last one, or we don't actually need to do any
+ * 'work'. If it is the last *again*, we rely on
+ * WORK_STRUCT_PENDING_BIT.
+ */
+ if (!atomic_add_unless(&sched_core_count, -1, 1))
+ schedule_work(&_work);
}
#else /* !CONFIG_SCHED_CORE */
Powered by blists - more mailing lists