[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1423988345-4005-4-git-send-email-bob.liu@oracle.com>
Date: Sun, 15 Feb 2015 16:18:58 +0800
From: Bob Liu <bob.liu@...cle.com>
To: xen-devel@...ts.xen.org
Cc: david.vrabel@...rix.com, linux-kernel@...r.kernel.org,
roger.pau@...rix.com, konrad.wilk@...cle.com,
felipe.franciosi@...rix.com, axboe@...com, hch@...radead.org,
avanzini.arianna@...il.com, Bob Liu <bob.liu@...cle.com>
Subject: [PATCH 03/10] xen/blkfront: reorg info->io_lock after using blk-mq API
Drop unnecessary holding of info->io_lock when calling into blk-mq apis.
Signed-off-by: Bob Liu <bob.liu@...cle.com>
---
drivers/block/xen-blkfront.c | 38 ++++++++++++++++----------------------
1 file changed, 16 insertions(+), 22 deletions(-)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3589436..5a90a51 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -614,25 +614,28 @@ static int blk_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(qd->rq);
spin_lock_irq(&info->io_lock);
if (RING_FULL(&info->ring)) {
+ spin_unlock_irq(&info->io_lock);
blk_mq_stop_hw_queue(hctx);
ret = BLK_MQ_RQ_QUEUE_BUSY;
goto out;
}
if (blkif_request_flush_invalid(qd->rq, info)) {
+ spin_unlock_irq(&info->io_lock);
ret = BLK_MQ_RQ_QUEUE_ERROR;
goto out;
}
if (blkif_queue_request(qd->rq)) {
+ spin_unlock_irq(&info->io_lock);
blk_mq_stop_hw_queue(hctx);
ret = BLK_MQ_RQ_QUEUE_BUSY;
goto out;
}
flush_requests(info);
-out:
spin_unlock_irq(&info->io_lock);
+out:
return ret;
}
@@ -891,19 +894,15 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
static void xlvbd_release_gendisk(struct blkfront_info *info)
{
unsigned int minor, nr_minors;
- unsigned long flags;
if (info->rq == NULL)
return;
- spin_lock_irqsave(&info->io_lock, flags);
-
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irqrestore(&info->io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work(&info->work);
@@ -922,21 +921,25 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
info->gd = NULL;
}
-/* Called with info->io_lock holded */
static void kick_pending_request_queues(struct blkfront_info *info)
{
- if (!RING_FULL(&info->ring))
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->io_lock, flags);
+ if (!RING_FULL(&info->ring)) {
+ spin_unlock_irqrestore(&info->io_lock, flags);
blk_mq_start_stopped_hw_queues(info->rq, true);
+ return;
+ }
+ spin_unlock_irqrestore(&info->io_lock, flags);
}
static void blkif_restart_queue(struct work_struct *work)
{
struct blkfront_info *info = container_of(work, struct blkfront_info, work);
- spin_lock_irq(&info->io_lock);
if (info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(info);
- spin_unlock_irq(&info->io_lock);
}
static void blkif_free(struct blkfront_info *info, int suspend)
@@ -946,13 +949,13 @@ static void blkif_free(struct blkfront_info *info, int suspend)
int i, j, segs;
/* Prevent new requests being issued until we fix things up. */
- spin_lock_irq(&info->io_lock);
info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */
if (info->rq)
blk_mq_stop_hw_queues(info->rq);
+ spin_lock_irq(&info->io_lock);
/* Remove all persistent grants */
if (!list_empty(&info->grants)) {
list_for_each_entry_safe(persistent_gnt, n,
@@ -1136,13 +1139,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
- spin_lock_irqsave(&info->io_lock, flags);
-
- if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
- spin_unlock_irqrestore(&info->io_lock, flags);
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return IRQ_HANDLED;
- }
+ spin_lock_irqsave(&info->io_lock, flags);
again:
rp = info->ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
@@ -1233,9 +1233,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
} else
info->ring.sring->rsp_event = i + 1;
- kick_pending_request_queues(info);
-
spin_unlock_irqrestore(&info->io_lock, flags);
+ kick_pending_request_queues(info);
return IRQ_HANDLED;
}
@@ -1518,8 +1517,6 @@ static int blkif_recover(struct blkfront_info *info)
xenbus_switch_state(info->xbdev, XenbusStateConnected);
- spin_lock_irq(&info->io_lock);
-
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
@@ -1533,7 +1530,6 @@ static int blkif_recover(struct blkfront_info *info)
blk_mq_requeue_request(req);
}
blk_mq_kick_requeue_list(info->rq);
- spin_unlock_irq(&info->io_lock);
while ((bio = bio_list_pop(&bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
@@ -1863,10 +1859,8 @@ static void blkfront_connect(struct blkfront_info *info)
xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Kick pending requests. */
- spin_lock_irq(&info->io_lock);
info->connected = BLKIF_STATE_CONNECTED;
kick_pending_request_queues(info);
- spin_unlock_irq(&info->io_lock);
add_disk(info->gd);
--
1.8.3.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists