>From 5fcc38d87b2cd8c05c5306c0140ccc076c5bf963 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 28 Sep 2022 16:33:27 +0200 Subject: [PATCH 1/2] irq_work: Introduce irq_work_raw_sync() Introduce a non-sleeping spinning variant of irq_work_sync(), called irq_work_raw_sync(). Its usage is limited to contexts where interrupts are disabled, and unlike irq_work_sync(), may fail if the work is pending in the current CPU. Signed-off-by: Marco Elver --- v2: * New patch. --- include/linux/irq_work.h | 1 + kernel/irq_work.c | 41 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 8cd11a223260..490adecbb4be 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -59,6 +59,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu); void irq_work_tick(void); void irq_work_sync(struct irq_work *work); +bool irq_work_raw_sync(struct irq_work *work); #ifdef CONFIG_IRQ_WORK #include diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 7afa40fe5cc4..b251b3437db1 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -290,6 +290,47 @@ void irq_work_sync(struct irq_work *work) } EXPORT_SYMBOL_GPL(irq_work_sync); +/* + * Synchronize against the irq_work @work, ensuring the entry is not currently + * in use after returning true; returns false if it's impossible to synchronize + * due to being queued on the current CPU. Requires that interrupts are already + * disabled (prefer irq_work_sync() in all other cases). + */ +bool irq_work_raw_sync(struct irq_work *work) +{ + struct llist_node *head; + struct irq_work *entry; + + /* + * Interrupts should be disabled, so that we can be sure the current + * CPU's work queues aren't concurrently run, cleared, and potentially + * some of its entries becoming invalid in the below iterations. + */ + lockdep_assert_irqs_disabled(); + + while (irq_work_is_busy(work)) { + /* + * It is only safe to wait if the work is not on this CPU's work + * queues. Also beware of concurrent irq_work_queue_on(), so we + * need to keep re-checking this CPU's queues in this busy loop. + */ + head = READ_ONCE(this_cpu_ptr(&raised_list)->first); + llist_for_each_entry(entry, head, node.llist) { + if (entry == work) + return false; + } + head = READ_ONCE(this_cpu_ptr(&lazy_list)->first); + llist_for_each_entry(entry, head, node.llist) { + if (entry == work) + return false; + } + cpu_relax(); + } + + return true; +} +EXPORT_SYMBOL_GPL(irq_work_raw_sync); + static void run_irq_workd(unsigned int cpu) { irq_work_run_list(this_cpu_ptr(&lazy_list)); -- 2.37.3.998.g577e59143f-goog