[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YPVK1SPI0ZWmkOgd@infradead.org>
Date: Mon, 19 Jul 2021 10:50:13 +0100
From: Christoph Hellwig <hch@...radead.org>
To: Luis Chamberlain <mcgrof@...nel.org>
Cc: Christoph Hellwig <hch@...radead.org>, axboe@...nel.dk,
hare@...e.de, bvanassche@....org, ming.lei@...hat.com,
jack@...e.cz, osandov@...com, linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 6/6] block: skip queue if NULL on blk_cleanup_queue()
On Thu, Jul 15, 2021 at 12:07:26PM -0700, Luis Chamberlain wrote:
> > For all drivers using blk_alloc_disk/blk_mq_alloc_disk there should
> > always be a queue. The others ones aren't ready to handle errors
> > from add_disk yet in any way I think (and I plan to fix this up
> > ASAP).
>
> Have an example in mind?
The only ones left are nvme, dasd and scsi. NVMe is trivial (attached),
dasd needs a little more work, I need to send up a WIP to the
maintainers. scsi is the real problem and will require a fair amount
of work.
---
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 11779be42186..35da956acef6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3726,9 +3726,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!ns)
goto out_free_id;
- ns->queue = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ns->queue))
+ disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+ if (IS_ERR(disk))
goto out_free_ns;
+ disk->fops = &nvme_bdev_ops;
+ disk->private_data = ns;
+
+ ns->disk = disk;
+ ns->queue = disk->queue;
if (ctrl->opts && ctrl->opts->data_digest)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
@@ -3737,20 +3742,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
- ns->queue->queuedata = ns;
ns->ctrl = ctrl;
kref_init(&ns->kref);
if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
- goto out_free_queue;
-
- disk = alloc_disk_node(0, node);
- if (!disk)
- goto out_unlink_ns;
+ goto out_cleanup_disk;
- disk->fops = &nvme_bdev_ops;
- disk->private_data = ns;
- disk->queue = ns->queue;
/*
* Without the multipath code enabled, multiple controller per
* subsystems are visible as devices and thus we cannot use the
@@ -3759,15 +3756,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
ns->head->instance);
- ns->disk = disk;
if (nvme_update_ns_info(ns, id))
- goto out_put_disk;
+ goto out_unlink_ns;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
if (nvme_nvm_register(ns, disk->disk_name, node)) {
dev_warn(ctrl->device, "LightNVM init failure\n");
- goto out_put_disk;
+ goto out_unlink_ns;
}
}
@@ -3786,10 +3782,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
kfree(id);
return;
- out_put_disk:
- /* prevent double queue cleanup */
- ns->disk->queue = NULL;
- put_disk(ns->disk);
+
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3797,8 +3790,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
list_del_init(&ns->head->entry);
mutex_unlock(&ctrl->subsys->lock);
nvme_put_ns_head(ns->head);
- out_free_queue:
- blk_cleanup_queue(ns->queue);
+ out_cleanup_disk:
+ blk_cleanup_disk(disk);
out_free_ns:
kfree(ns);
out_free_id:
Powered by blists - more mailing lists