[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200722153017.445914025@infradead.org>
Date: Wed, 22 Jul 2020 17:01:56 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org, torvalds@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org, will@...nel.org, paulmck@...nel.org,
hch@....de, axboe@...nel.dk, chris@...is-wilson.co.uk,
davem@...emloft.net, kuba@...nel.org, fweisbec@...il.com,
oleg@...hat.com, peterz@...radead.org
Subject: [RFC][PATCH 7/9] smp,irq_work: Use the new irq_work API
Convert the performance sensitive users of
smp_call_single_function_async() over to the new
irq_work_queue_remote_static().
The new API is marginally less crap but taking these users away allows
fixing up smp_call_single_function_async() without risk of performance
regressions.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
block/blk-mq.c | 8 ++++----
include/linux/blkdev.h | 4 ++--
include/linux/netdevice.h | 3 ++-
kernel/sched/core.c | 16 ++++++++--------
kernel/sched/fair.c | 6 +++---
kernel/sched/sched.h | 4 ++--
net/core/dev.c | 15 ++++++++++-----
7 files changed, 31 insertions(+), 25 deletions(-)
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -623,9 +623,9 @@ static int blk_softirq_cpu_dead(unsigned
}
-static void __blk_mq_complete_request_remote(void *data)
+static void __blk_mq_complete_request_remote(struct irq_work *work)
{
- struct request *rq = data;
+ struct request *rq = container_of(work, struct request, work);
/*
* For most of single queue controllers, there is only one irq vector
@@ -672,8 +672,8 @@ bool blk_mq_complete_request_remote(stru
return false;
if (blk_mq_complete_need_ipi(rq)) {
- INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
- smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
+ rq->work = IRQ_WORK_INIT_HARD(__blk_mq_complete_request_remote);
+ irq_work_queue_remote_static(rq->mq_ctx->cpu, &rq->work);
} else {
if (rq->q->nr_hw_queues > 1)
return false;
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -19,7 +19,7 @@
#include <linux/stringify.h>
#include <linux/gfp.h>
#include <linux/bsg.h>
-#include <linux/smp.h>
+#include <linux/irq_work.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
@@ -234,7 +234,7 @@ struct request {
unsigned long deadline;
union {
- struct __call_single_data csd;
+ struct irq_work work;
u64 fifo_time;
};
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/prefetch.h>
+#include <linux/irq_work.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
@@ -3126,7 +3127,7 @@ struct softnet_data {
unsigned int input_queue_head ____cacheline_aligned_in_smp;
/* Elements below can be accessed between CPUs for RPS/RFS */
- call_single_data_t csd ____cacheline_aligned_in_smp;
+ struct irq_work work ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_tail;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -265,9 +265,9 @@ static void __hrtick_restart(struct rq *
/*
* called from hardirq (IPI) context
*/
-static void __hrtick_start(void *arg)
+static void __hrtick_start(struct irq_work *work)
{
- struct rq *rq = arg;
+ struct rq *rq = container_of(work, struct rq, hrtick_work);
struct rq_flags rf;
rq_lock(rq, &rf);
@@ -298,7 +298,7 @@ void hrtick_start(struct rq *rq, u64 del
if (rq == this_rq())
__hrtick_restart(rq);
else
- smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
+ irq_work_queue_remote_static(cpu_of(rq), &rq->hrtick_work);
}
#else
@@ -323,7 +323,7 @@ void hrtick_start(struct rq *rq, u64 del
static void hrtick_rq_init(struct rq *rq)
{
#ifdef CONFIG_SMP
- INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
+ rq->hrtick_work = IRQ_WORK_INIT_HARD(__hrtick_start);
#endif
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
rq->hrtick_timer.function = hrtick;
@@ -633,14 +633,14 @@ void wake_up_nohz_cpu(int cpu)
wake_up_idle_cpu(cpu);
}
-static void nohz_csd_func(void *info)
+static void nohz_work_func(struct irq_work *work)
{
- struct rq *rq = info;
+ struct rq *rq = container_of(work, struct rq, nohz_work);
int cpu = cpu_of(rq);
unsigned int flags;
/*
- * Release the rq::nohz_csd.
+ * Release rq::nohz_work.
*/
flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
WARN_ON(!(flags & NOHZ_KICK_MASK));
@@ -6827,7 +6827,7 @@ void __init sched_init(void)
rq->last_blocked_load_update_tick = jiffies;
atomic_set(&rq->nohz_flags, 0);
- INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
+ rq->nohz_work = IRQ_WORK_INIT_HARD(nohz_work_func);
#endif
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10039,8 +10039,8 @@ static void kick_ilb(unsigned int flags)
return;
/*
- * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
- * the first flag owns it; cleared by nohz_csd_func().
+ * Access to rq::nohz_work is serialized by NOHZ_KICK_MASK; he who sets
+ * the first flag owns it; cleared by nohz_work_func().
*/
flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
if (flags & NOHZ_KICK_MASK)
@@ -10051,7 +10051,7 @@ static void kick_ilb(unsigned int flags)
* is idle. And the softirq performing nohz idle load balance
* will be run before returning from the IPI.
*/
- smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
+ irq_work_queue_remote_static(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_work);
}
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -904,7 +904,7 @@ struct rq {
#ifdef CONFIG_SMP
unsigned long last_blocked_load_update_tick;
unsigned int has_blocked_load;
- call_single_data_t nohz_csd;
+ struct irq_work nohz_work;
#endif /* CONFIG_SMP */
unsigned int nohz_tick_stopped;
atomic_t nohz_flags;
@@ -1015,7 +1015,7 @@ struct rq {
#ifdef CONFIG_SCHED_HRTICK
#ifdef CONFIG_SMP
- call_single_data_t hrtick_csd;
+ struct irq_work hrtick_work;
#endif
struct hrtimer hrtick_timer;
#endif
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4444,9 +4444,9 @@ EXPORT_SYMBOL(rps_may_expire_flow);
#endif /* CONFIG_RFS_ACCEL */
/* Called from hardirq (IPI) context */
-static void rps_trigger_softirq(void *data)
+static void rps_trigger_softirq(struct irq_work *work)
{
- struct softnet_data *sd = data;
+ struct softnet_data *sd = container_of(work, struct softnet_data, work);
____napi_schedule(sd, &sd->backlog);
sd->received_rps++;
@@ -6185,8 +6185,13 @@ static void net_rps_send_ipi(struct soft
while (remsd) {
struct softnet_data *next = remsd->rps_ipi_next;
- if (cpu_online(remsd->cpu))
- smp_call_function_single_async(remsd->cpu, &remsd->csd);
+ if (cpu_online(remsd->cpu)) {
+ /*
+ * XXX can there be two CPUs calling into the same remsd?
+ * XXX serialized by NAPI_STATE_SCHED ??
+ */
+ irq_work_queue_remote_static(remsd->cpu, &remsd->work);
+ }
remsd = next;
}
#endif
@@ -10661,7 +10666,7 @@ static int __init net_dev_init(void)
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
- INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
+ init_irq_work(&sd->work, rps_trigger_softirq);
sd->cpu = i;
#endif
Powered by blists - more mailing lists