[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200818112418.328209144@infradead.org>
Date: Tue, 18 Aug 2020 12:51:08 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org, torvalds@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org, will@...nel.org, paulmck@...nel.org,
hch@....de, axboe@...nel.dk, chris@...is-wilson.co.uk,
davem@...emloft.net, kuba@...nel.org, fweisbec@...il.com,
oleg@...hat.com, vincent.guittot@...aro.org, peterz@...radead.org
Subject: [RFC][PATCH v2 06/10] irq_work: Provide irq_work_queue_remote_static()
Provide the same horrible semantics provided by
smp_call_function_single_async(), doing so allows skiping a bunch of
atomic ops.
API wise this is horrible crap as it relies on external serialization.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
include/linux/irq_work.h | 3 ++-
kernel/irq_work.c | 21 ++++++++++++++++++++-
2 files changed, 22 insertions(+), 2 deletions(-)
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -19,7 +19,7 @@ struct irq_work {
};
#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
- .node = { .u_flags = (_flags), }, \
+ .node = { .u_flags = CSD_TYPE_IRQ_WORK | (_flags), }, \
.func = (_func), \
}
@@ -68,6 +68,7 @@ static inline bool irq_work_needs_cpu(vo
#ifdef CONFIG_SMP
extern int irq_work_queue_remote(int cpu, struct irq_work *work);
+extern int irq_work_queue_remote_static(int cpu, struct irq_work *work);
extern void irq_work_single(void *arg);
#endif
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -29,7 +29,7 @@ static bool irq_work_claim(struct irq_wo
{
int oflags;
- oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
+ oflags = atomic_fetch_or(IRQ_WORK_CLAIMED, &work->node.a_flags);
/*
* If the work is already pending, no need to raise the IPI.
* The pairing smp_mb() in irq_work_single() makes sure
@@ -63,6 +63,9 @@ void irq_work_single(void *arg)
work->func(work);
lockdep_irq_work_exit(flags);
+ if (!(flags & IRQ_WORK_BUSY))
+ return;
+
/*
* Clear the BUSY bit, if set, and return to the free state if no-one
* else claimed it meanwhile.
@@ -108,6 +111,22 @@ int irq_work_queue_remote(int cpu, struc
return 0;
}
+
+int irq_work_queue_remote_static(int cpu, struct irq_work *work)
+{
+ /*
+ * Ensures preemption is disabled in the caller.
+ */
+ WARN_ON_ONCE(cpu == smp_processor_id());
+
+ if (work->node.u_flags & IRQ_WORK_PENDING)
+ return -EBUSY;
+
+ work->node.u_flags |= IRQ_WORK_PENDING;
+ __smp_call_single_queue(cpu, &work->node.llist);
+
+ return 0;
+}
#endif /* CONFIG_SMP */
Powered by blists - more mailing lists