[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201027205806.s7qau5xf4lzuslcu@linutronix.de>
Date: Tue, 27 Oct 2020 21:58:06 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: Christoph Hellwig <hch@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>,
David Runge <dave@...epmap.de>, linux-rt-users@...r.kernel.org,
Jens Axboe <axboe@...nel.dk>, linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
Peter Zijlstra <peterz@...radead.org>,
Daniel Wagner <dwagner@...e.de>
Subject: Re: [PATCH RFC] blk-mq: Don't IPI requests on PREEMPT_RT
On 2020-10-27 17:23:09 [+0000], Christoph Hellwig wrote:
> Ok. I was hoping we could hide this in core code somehow, especially
> a peterz didn't like the use of smp_call_function_single_async in the
> blk-mq completion code very much.
No idea how you could efficiently avoid smp_call_function_single_async():
- workqueue (okay)
- a timer_list timer which expires now. More code plus it may delay up
to HZ.
> Sebastian, would this solve your preempt-rt and lockdep issues?
the problem with that is that on RT/force-threaded it will always wake
ksoftirqd thread and complete there. That are extra steps which should
be probably avoided.
Now.
The hunk in blk_mq_complete_need_ipi() will avoid the waking a thread
with force-threaded enabled.
The remaining part is a switch to llist which avoids locking (IRQ
off/on) and it allows invoke the IPI/raise softirq only if something was
added. The entries are now processed in the reverse order but this
shouldn't matter right?
I would split this into two patches (the blk_mq_complete_need_ipi() hunk
and the llist part) unless there are objections.
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 55bcee5dc0320..d2452ee9b0e2c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -41,7 +41,7 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -565,80 +565,31 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
}
EXPORT_SYMBOL(blk_mq_end_request);
-/*
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+static void blk_complete_reqs(struct llist_head *cpu_list)
{
- struct list_head *cpu_list, local_list;
+ struct llist_node *entry;
+ struct request *rq, *rq_next;
- local_irq_disable();
- cpu_list = this_cpu_ptr(&blk_cpu_done);
- list_replace_init(cpu_list, &local_list);
- local_irq_enable();
+ entry = llist_del_all(cpu_list);
- while (!list_empty(&local_list)) {
- struct request *rq;
-
- rq = list_entry(local_list.next, struct request, ipi_list);
- list_del_init(&rq->ipi_list);
+ llist_for_each_entry_safe(rq, rq_next, entry, ipi_list)
rq->q->mq_ops->complete(rq);
- }
}
-static void blk_mq_trigger_softirq(struct request *rq)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
{
- struct list_head *list;
- unsigned long flags;
-
- local_irq_save(flags);
- list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->ipi_list, list);
-
- /*
- * If the list only contains our just added request, signal a raise of
- * the softirq. If there are already entries there, someone already
- * raised the irq but it hasn't run yet.
- */
- if (list->next == &rq->ipi_list)
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_restore(flags);
+ blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
}
static int blk_softirq_cpu_dead(unsigned int cpu)
{
- /*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
- */
- local_irq_disable();
- list_splice_init(&per_cpu(blk_cpu_done, cpu),
- this_cpu_ptr(&blk_cpu_done));
- raise_softirq_irqoff(BLOCK_SOFTIRQ);
- local_irq_enable();
-
+ blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
return 0;
}
-
static void __blk_mq_complete_request_remote(void *data)
{
- struct request *rq = data;
-
- /*
- * For most of single queue controllers, there is only one irq vector
- * for handling I/O completion, and the only irq's affinity is set
- * to all possible CPUs. On most of ARCHs, this affinity means the irq
- * is handled on one specific CPU.
- *
- * So complete I/O requests in softirq context in case of single queue
- * devices to avoid degrading I/O performance due to irqsoff latency.
- */
- if (rq->q->nr_hw_queues == 1)
- blk_mq_trigger_softirq(rq);
- else
- rq->q->mq_ops->complete(rq);
+ __raise_softirq_irqoff(BLOCK_SOFTIRQ);
}
static inline bool blk_mq_complete_need_ipi(struct request *rq)
@@ -648,6 +599,14 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
if (!IS_ENABLED(CONFIG_SMP) ||
!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
return false;
+ /*
+ * With force threaded interrupts enabled, raising softirq from an SMP
+ * function call will always result in waking the ksoftirqd thread.
+ * This is probably worse than completing the request on a different
+ * cache domain.
+ */
+ if (force_irqthreads)
+ return false;
/* same CPU or cache domain? Complete locally */
if (cpu == rq->mq_ctx->cpu ||
@@ -661,6 +620,7 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
bool blk_mq_complete_request_remote(struct request *rq)
{
+ struct llist_head *cpu_list;
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
/*
@@ -671,14 +631,21 @@ bool blk_mq_complete_request_remote(struct request *rq)
return false;
if (blk_mq_complete_need_ipi(rq)) {
- rq->csd.func = __blk_mq_complete_request_remote;
- rq->csd.info = rq;
- rq->csd.flags = 0;
- smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
+ unsigned int cpu;
+
+ cpu = rq->mq_ctx->cpu;
+ cpu_list = &per_cpu(blk_cpu_done, cpu);
+ if (llist_add(&rq->ipi_list, cpu_list)) {
+ rq->csd.func = __blk_mq_complete_request_remote;
+ rq->csd.flags = 0;
+ smp_call_function_single_async(cpu, &rq->csd);
+ }
} else {
if (rq->q->nr_hw_queues > 1)
return false;
- blk_mq_trigger_softirq(rq);
+ cpu_list = this_cpu_ptr(&blk_cpu_done);
+ if (llist_add(&rq->ipi_list, cpu_list))
+ raise_softirq(BLOCK_SOFTIRQ);
}
return true;
@@ -3909,7 +3876,7 @@ static int __init blk_mq_init(void)
int i;
for_each_possible_cpu(i)
- INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+ init_llist_head(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 639cae2c158b5..331b2b675b417 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -156,7 +156,7 @@ struct request {
*/
union {
struct hlist_node hash; /* merge hash */
- struct list_head ipi_list;
+ struct llist_node ipi_list;
};
/*
Powered by blists - more mailing lists