[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1423988345-4005-11-git-send-email-bob.liu@oracle.com>
Date: Sun, 15 Feb 2015 16:19:05 +0800
From: Bob Liu <bob.liu@...cle.com>
To: xen-devel@...ts.xen.org
Cc: david.vrabel@...rix.com, linux-kernel@...r.kernel.org,
roger.pau@...rix.com, konrad.wilk@...cle.com,
felipe.franciosi@...rix.com, axboe@...com, hch@...radead.org,
avanzini.arianna@...il.com, Bob Liu <bob.liu@...cle.com>
Subject: [PATCH 10/10] xen/blkfront: use work queue to fast blkif interrupt return
Move the request complete logic out of blkif_interrupt() to a work queue,
after that we can replace 'spin_lock_irq' with 'spin_lock' so that irq won't
be disabled too long in blk_mq_queue_rq().
No more warning like this:
INFO: rcu_sched detected stalls on CPUs/tasks: { 7} (detected by 0,
t=15002 jiffies, g=1018, c=1017, q=0)
Task dump for CPU 7:
swapper/7 R running task 0 0 1 0x00080000
ffff88028f4edf50 0000000000000086 ffff88028f4ee330 ffff880283df3e18
ffffffff8108836a 0000000183f75438 0000000000000040 000000000000df50
0000008bde2dd600 ffff88028f4ee330 0000000000000086 ffff880283f75038
Call Trace:
[<ffffffff8108836a>] ? __hrtimer_start_range_ns+0x269/0x27b
[<ffffffff8108838f>] ? hrtimer_start+0x13/0x15
[<ffffffff81085298>] ? rcu_eqs_enter+0x66/0x79
[<ffffffff81013847>] ? default_idle+0x9/0xd
[<ffffffff81013f2d>] ? arch_cpu_idle+0xa/0xc
[<ffffffff810746ad>] ? cpu_startup_entry+0x118/0x253
[<ffffffff81030f57>] ? start_secondary+0x12e/0x132
Signed-off-by: Bob Liu <bob.liu@...cle.com>
---
drivers/block/xen-blkfront.c | 47 ++++++++++++++++++++++++++------------------
1 file changed, 28 insertions(+), 19 deletions(-)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 32caf85..bdd9a15 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -116,6 +116,7 @@ struct blkfront_ring_info {
struct blkif_front_ring ring;
unsigned int evtchn, irq;
struct work_struct work;
+ struct work_struct done_work;
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
struct list_head grants;
@@ -630,29 +631,29 @@ static int blk_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
int ret = BLK_MQ_RQ_QUEUE_OK;
blk_mq_start_request(qd->rq);
- spin_lock_irq(&rinfo->io_lock);
+ spin_lock(&rinfo->io_lock);
if (RING_FULL(&rinfo->ring)) {
- spin_unlock_irq(&rinfo->io_lock);
+ spin_unlock(&rinfo->io_lock);
blk_mq_stop_hw_queue(hctx);
ret = BLK_MQ_RQ_QUEUE_BUSY;
goto out;
}
if (blkif_request_flush_invalid(qd->rq, rinfo->info)) {
- spin_unlock_irq(&rinfo->io_lock);
+ spin_unlock(&rinfo->io_lock);
ret = BLK_MQ_RQ_QUEUE_ERROR;
goto out;
}
if (blkif_queue_request(qd->rq, rinfo)) {
- spin_unlock_irq(&rinfo->io_lock);
+ spin_unlock(&rinfo->io_lock);
blk_mq_stop_hw_queue(hctx);
ret = BLK_MQ_RQ_QUEUE_BUSY;
goto out;
}
flush_requests(rinfo);
- spin_unlock_irq(&rinfo->io_lock);
+ spin_unlock(&rinfo->io_lock);
out:
return ret;
}
@@ -937,6 +938,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work(&rinfo->work);
+ flush_work(&rinfo->done_work);
}
del_gendisk(info->gd);
@@ -955,15 +957,13 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rinfo->io_lock, flags);
+ spin_lock(&rinfo->io_lock);
if (!RING_FULL(&rinfo->ring)) {
- spin_unlock_irqrestore(&rinfo->io_lock, flags);
+ spin_unlock(&rinfo->io_lock);
blk_mq_start_stopped_hw_queues(rinfo->info->rq, true);
return;
}
- spin_unlock_irqrestore(&rinfo->io_lock, flags);
+ spin_unlock(&rinfo->io_lock);
}
static void blkif_restart_queue(struct work_struct *work)
@@ -1070,6 +1070,7 @@ free_shadow:
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work(&rinfo->work);
+ flush_work(&rinfo->done_work);
/* Free resources associated with old device channel. */
if (rinfo->ring_ref != GRANT_INVALID_REF) {
@@ -1168,19 +1169,15 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info *ri
}
}
-static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+static void blkif_done_req(struct work_struct *work)
{
+ struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, done_work);
struct request *req;
struct blkif_response *bret;
RING_IDX i, rp;
- unsigned long flags;
- struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
struct blkfront_info *info = rinfo->info;
- if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
- return IRQ_HANDLED;
-
- spin_lock_irqsave(&rinfo->io_lock, flags);
+ spin_lock(&rinfo->io_lock);
again:
rp = rinfo->ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
@@ -1271,9 +1268,20 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
} else
rinfo->ring.sring->rsp_event = i + 1;
- spin_unlock_irqrestore(&rinfo->io_lock, flags);
- kick_pending_request_queues(rinfo);
+ if (!RING_FULL(&rinfo->ring))
+ blk_mq_start_stopped_hw_queues(rinfo->info->rq, true);
+ spin_unlock(&rinfo->io_lock);
+}
+
+static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+{
+ struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
+ struct blkfront_info *info = rinfo->info;
+
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+ return IRQ_HANDLED;
+ schedule_work(&rinfo->done_work);
return IRQ_HANDLED;
}
@@ -1535,6 +1543,7 @@ static int blkfront_probe(struct xenbus_device *dev,
rinfo->persistent_gnts_c = 0;
rinfo->info = info;
INIT_WORK(&rinfo->work, blkif_restart_queue);
+ INIT_WORK(&rinfo->done_work, blkif_done_req);
for (i = 0; i < BLK_RING_SIZE; i++)
rinfo->shadow[i].req.u.rw.id = i+1;
--
1.8.3.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists