[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240711130004.2157737-4-vschneid@redhat.com>
Date: Thu, 11 Jul 2024 14:59:57 +0200
From: Valentin Schneider <vschneid@...hat.com>
To: linux-kernel@...r.kernel.org,
rcu@...r.kernel.org
Cc: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Phil Auld <pauld@...hat.com>,
Clark Williams <williams@...hat.com>,
Tomas Glozar <tglozar@...hat.com>,
"Paul E. McKenney" <paulmck@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>,
Josh Triplett <josh@...htriplett.org>,
Boqun Feng <boqun.feng@...il.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Zqiang <qiang.zhang1211@...il.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Catalin Marinas <catalin.marinas@....com>,
Arnd Bergmann <arnd@...db.de>,
Guo Ren <guoren@...nel.org>,
Palmer Dabbelt <palmer@...osinc.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Oleg Nesterov <oleg@...hat.com>,
Jens Axboe <axboe@...nel.dk>
Subject: [RFC PATCH v3 03/10] task_work, sched: Add a _locked variant to task_work_cancel()
Later commits will need to issue a task_work_cancel() from within the
scheduler with the task's ->pi_lock held.
Add a _locked variant that expects p->pi_lock to be held. Expose it in a
separate scheduler header file, as this really is a scheduler-only
interface.
Signed-off-by: Valentin Schneider <vschneid@...hat.com>
---
kernel/sched/task_work_sched.h | 14 +++++++
kernel/task_work.c | 67 ++++++++++++++++++++++++++--------
2 files changed, 66 insertions(+), 15 deletions(-)
create mode 100644 kernel/sched/task_work_sched.h
diff --git a/kernel/sched/task_work_sched.h b/kernel/sched/task_work_sched.h
new file mode 100644
index 0000000000000..e235da456427f
--- /dev/null
+++ b/kernel/sched/task_work_sched.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Scheduler internal task_work methods
+ */
+#ifndef _KERNEL_TASK_WORK_SCHED_H
+#define _KERNEL_TASK_WORK_SCHED_H
+
+#include <linux/task_work.h>
+#include <linux/sched.h>
+
+struct callback_head *
+task_work_cancel_locked(struct task_struct *task, task_work_func_t func);
+
+#endif
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 95a7e1b7f1dab..81092bc2e7371 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -3,6 +3,8 @@
#include <linux/task_work.h>
#include <linux/resume_user_mode.h>
+#include "sched/task_work_sched.h"
+
static struct callback_head work_exited; /* all we need is ->next == NULL */
/**
@@ -74,33 +76,20 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
return 0;
}
-/**
- * task_work_cancel_match - cancel a pending work added by task_work_add()
- * @task: the task which should execute the work
- * @match: match function to call
- * @data: data to be passed in to match function
- *
- * RETURNS:
- * The found work or NULL if not found.
- */
-struct callback_head *
-task_work_cancel_match(struct task_struct *task,
+static struct callback_head *
+task_work_cancel_match_locked(struct task_struct *task,
bool (*match)(struct callback_head *, void *data),
void *data)
{
struct callback_head **pprev = &task->task_works;
struct callback_head *work;
- unsigned long flags;
- if (likely(!task_work_pending(task)))
- return NULL;
/*
* If cmpxchg() fails we continue without updating pprev.
* Either we raced with task_work_add() which added the
* new entry before this work, we will find it again. Or
* we raced with task_work_run(), *pprev == NULL/exited.
*/
- raw_spin_lock_irqsave(&task->pi_lock, flags);
work = READ_ONCE(*pprev);
while (work) {
if (!match(work, data)) {
@@ -109,6 +98,32 @@ task_work_cancel_match(struct task_struct *task,
} else if (try_cmpxchg(pprev, &work, work->next))
break;
}
+
+ return work;
+}
+
+/**
+ * task_work_cancel_match - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @match: match function to call
+ * @data: data to be passed in to match function
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
+struct callback_head *
+task_work_cancel_match(struct task_struct *task,
+ bool (*match)(struct callback_head *, void *data),
+ void *data)
+{
+ unsigned long flags;
+ struct callback_head *work;
+
+ if (likely(!task_work_pending(task)))
+ return NULL;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+ work = task_work_cancel_match_locked(task, match, data);
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return work;
@@ -136,6 +151,28 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
return task_work_cancel_match(task, task_work_func_match, func);
}
+/**
+ * task_work_cancel - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @func: identifies the work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
+struct callback_head *
+task_work_cancel_locked(struct task_struct *task, task_work_func_t func)
+{
+ lockdep_assert_held(&task->pi_lock);
+
+ if (likely(!task_work_pending(task)))
+ return NULL;
+
+ return task_work_cancel_match_locked(task, task_work_func_match, func);
+}
+
/**
* task_work_run - execute the works added by task_work_add()
*
--
2.43.0
Powered by blists - more mailing lists