[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250113043540.2396426-1-imran.f.khan@oracle.com>
Date: Mon, 13 Jan 2025 15:35:40 +1100
From: Imran Khan <imran.f.khan@...cle.com>
To: tj@...nel.org, jiangshanlai@...il.com
Cc: linux-kernel@...r.kernel.org
Subject: [RFC PATCH] workqueue: introduce queue_delayed_work_on_offline_safe.
Currently users of queue_delayed_work_on, need to ensure
that specified cpu is and remains online. The failure to
do so may result in delayed_work getting queued on an
offlined cpu and hence never getting executed.
The current users of queue_delayed_work_on, seem to ensure
the above mentioned criteria but for those, unknown amongst
current users or new users, who can't confirm to this
we need another interface.
So introduce queue_delayed_work_on_offline_safe, which
explicitly ensures that specified cpu is and remains online.
If the specified cpu is already offline or if that can't be
confirmed (due to failure in acquiring hotplug lock),
delayed_work.timer is not queued on specified cpu.
Signed-off-by: Imran Khan <imran.f.khan@...cle.com>
---
I have kept the patch as RFC because from mailing list,
I could not find any users, of queue_delayed_work_on,
that is ending up queuing dwork on an offlined CPU.
We have some in-house code that is running into this problem,
and currently we are fixing it on caller side of queue_delayed_work_on.
Other users who run into this issue, can also use the approach of
fixing it on caller side or we can use the interface introduced
here for such use cases.
include/linux/workqueue.h | 3 ++
kernel/workqueue.c | 67 ++++++++++++++++++++++++++++++++++++---
2 files changed, 66 insertions(+), 4 deletions(-)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b0dc957c3e560..57f39807f3bf1 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -589,6 +589,9 @@ extern bool queue_work_node(int node, struct workqueue_struct *wq,
struct work_struct *work);
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
+extern bool queue_delayed_work_on_offline_safe(int cpu,
+ struct workqueue_struct *wq, struct delayed_work *work,
+ unsigned long delay);
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8e0bb3c608239..f1b6320f675a6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2487,7 +2487,8 @@ void delayed_work_timer_fn(struct timer_list *t)
EXPORT_SYMBOL(delayed_work_timer_fn);
static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
- struct delayed_work *dwork, unsigned long delay)
+ struct delayed_work *dwork, unsigned long delay,
+ bool offline_safe)
{
struct timer_list *timer = &dwork->timer;
struct work_struct *work = &dwork->work;
@@ -2521,8 +2522,22 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
} else {
if (likely(cpu == WORK_CPU_UNBOUND))
add_timer_global(timer);
- else
+ else if (likely(!offline_safe))
add_timer_on(timer, cpu);
+ else { /* offline_safe */
+ if (unlikely(!cpu_online(cpu)))
+ /* cpu is already offline*/
+ add_timer_global(timer);
+ else if (cpus_read_trylock()) {
+ if (likely(cpu_online(cpu)))
+ add_timer_on(timer, cpu);
+ else
+ add_timer_global(timer);
+ cpus_read_unlock();
+ } else
+ /* could not get hotplug lock*/
+ add_timer_global(timer);
+ }
}
}
@@ -2549,7 +2564,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
!clear_pending_if_disabled(work)) {
- __queue_delayed_work(cpu, wq, dwork, delay);
+ __queue_delayed_work(cpu, wq, dwork, delay, false);
ret = true;
}
@@ -2558,6 +2573,50 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
}
EXPORT_SYMBOL(queue_delayed_work_on);
+/**
+ * queue_delayed_work_on_offline_safe - queue work on specific online CPU after
+ * delay,
+ *
+ * @cpu: CPU number to execute work on
+ * @wq: workqueue to use
+ * @dwork: work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * same as queue_delayed_work_on, but checks and ensures that specified @cpu
+ * is online. If @cpu is found to be offline or if its online status can't
+ * be confirmed queue @dwork on any CPU.
+ * cpus_read_trylock is used to ensure @cpu remains online, but we don't block
+ * if hptplug lock can't be taken. So there is good chance that even when
+ * specified @cpu was online, we queued @dwork to some other CPU, because
+ * cpu_read_trylock returned failure. On the plus side this interface
+ * makes sure that we don't endup putting @dwork on offlined @cpu and
+ * thus possibly losing it forever.
+ *
+ * Return: %false if @work was already on a queue, %true otherwise. If
+ * @delay is zero and @dwork is idle, it will be scheduled for immediate
+ * execution.
+ */
+bool queue_delayed_work_on_offline_safe(int cpu, struct workqueue_struct *wq,
+ struct delayed_work *dwork, unsigned long delay)
+{
+ struct work_struct *work = &dwork->work;
+ bool ret = false;
+ unsigned long irq_flags;
+
+ /* read the comment in __queue_work() */
+ local_irq_save(irq_flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
+ !clear_pending_if_disabled(work)) {
+ __queue_delayed_work(cpu, wq, dwork, delay, true);
+ ret = true;
+ }
+
+ local_irq_restore(irq_flags);
+ return ret;
+}
+EXPORT_SYMBOL(queue_delayed_work_on_offline_safe);
+
/**
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
* @cpu: CPU number to execute work on
@@ -2585,7 +2644,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
ret = work_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, &irq_flags);
if (!clear_pending_if_disabled(&dwork->work))
- __queue_delayed_work(cpu, wq, dwork, delay);
+ __queue_delayed_work(cpu, wq, dwork, delay, false);
local_irq_restore(irq_flags);
return ret;
base-commit: 2b88851f583d3c4e40bcd40cfe1965241ec229dd
--
2.34.1
Powered by blists - more mailing lists