[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240216180559.208276-7-tj@kernel.org>
Date: Fri, 16 Feb 2024 08:04:47 -1000
From: Tejun Heo <tj@...nel.org>
To: jiangshanlai@...il.com
Cc: torvalds@...ux-foundation.org,
linux-kernel@...r.kernel.org,
allen.lkml@...il.com,
kernel-team@...a.com,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 06/17] workqueue: Introduce work_cancel_flags
The cancel path used bool @is_dwork to distinguish canceling a regular work
and a delayed one. The planned disable/enable support will need passing
around another flag in the code path. As passing them around with bools will
be confusing, let's introduce named flags to pass around in the cancel path.
WORK_CANCEL_DELAYED replaces @is_dwork. No functional changes.
Signed-off-by: Tejun Heo <tj@...nel.org>
---
kernel/workqueue.c | 29 +++++++++++++++++------------
1 file changed, 17 insertions(+), 12 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b590d93d054b..317c85f051b0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -96,6 +96,10 @@ enum worker_flags {
WORKER_UNBOUND | WORKER_REBOUND,
};
+enum work_cancel_flags {
+ WORK_CANCEL_DELAYED = 1 << 0, /* canceling a delayed_work */
+};
+
enum wq_internal_consts {
NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
@@ -2028,7 +2032,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
/**
* try_to_grab_pending - steal work item from worklist and disable irq
* @work: work item to steal
- * @is_dwork: @work is a delayed_work
+ * @cflags: %WORK_CANCEL_ flags
* @irq_flags: place to store irq state
*
* Try to grab PENDING bit of @work. This function can handle @work in any
@@ -2055,7 +2059,7 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
*
* This function is safe to call from any context including IRQ handler.
*/
-static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+static int try_to_grab_pending(struct work_struct *work, u32 cflags,
unsigned long *irq_flags)
{
struct worker_pool *pool;
@@ -2064,7 +2068,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
local_irq_save(*irq_flags);
/* try to steal the timer if it exists */
- if (is_dwork) {
+ if (cflags & WORK_CANCEL_DELAYED) {
struct delayed_work *dwork = to_delayed_work(work);
/*
@@ -2543,7 +2547,8 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
int ret;
do {
- ret = try_to_grab_pending(&dwork->work, true, &irq_flags);
+ ret = try_to_grab_pending(&dwork->work, WORK_CANCEL_DELAYED,
+ &irq_flags);
} while (unlikely(ret == -EAGAIN));
if (likely(ret >= 0)) {
@@ -4103,13 +4108,13 @@ bool flush_rcu_work(struct rcu_work *rwork)
}
EXPORT_SYMBOL(flush_rcu_work);
-static bool __cancel_work(struct work_struct *work, bool is_dwork)
+static bool __cancel_work(struct work_struct *work, u32 cflags)
{
unsigned long irq_flags;
int ret;
do {
- ret = try_to_grab_pending(work, is_dwork, &irq_flags);
+ ret = try_to_grab_pending(work, cflags, &irq_flags);
} while (unlikely(ret == -EAGAIN));
if (unlikely(ret < 0))
@@ -4134,14 +4139,14 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k
return autoremove_wake_function(wait, mode, sync, key);
}
-static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
+static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
{
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
unsigned long irq_flags;
int ret;
do {
- ret = try_to_grab_pending(work, is_dwork, &irq_flags);
+ ret = try_to_grab_pending(work, cflags, &irq_flags);
/*
* If someone else is already canceling, wait for it to
* finish. flush_work() doesn't work for PREEMPT_NONE
@@ -4203,7 +4208,7 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
*/
bool cancel_work(struct work_struct *work)
{
- return __cancel_work(work, false);
+ return __cancel_work(work, 0);
}
EXPORT_SYMBOL(cancel_work);
@@ -4227,7 +4232,7 @@ EXPORT_SYMBOL(cancel_work);
*/
bool cancel_work_sync(struct work_struct *work)
{
- return __cancel_work_sync(work, false);
+ return __cancel_work_sync(work, 0);
}
EXPORT_SYMBOL_GPL(cancel_work_sync);
@@ -4249,7 +4254,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool cancel_delayed_work(struct delayed_work *dwork)
{
- return __cancel_work(&dwork->work, true);
+ return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED);
}
EXPORT_SYMBOL(cancel_delayed_work);
@@ -4264,7 +4269,7 @@ EXPORT_SYMBOL(cancel_delayed_work);
*/
bool cancel_delayed_work_sync(struct delayed_work *dwork)
{
- return __cancel_work_sync(&dwork->work, true);
+ return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED);
}
EXPORT_SYMBOL(cancel_delayed_work_sync);
--
2.43.2
Powered by blists - more mailing lists