diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index ee48ac5..8dc5d36 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -178,6 +178,9 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, nvmeq->hctx = hctx; else WARN_ON(nvmeq->hctx->tags != hctx->tags); + + irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, + hctx->cpumask); hctx->driver_data = nvmeq; return 0; } @@ -581,6 +584,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) enum dma_data_direction dma_dir; int psegs = req->nr_phys_segments; int result = BLK_MQ_RQ_QUEUE_BUSY; + /* * Requeued IO has already been prepped */ @@ -1788,6 +1792,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, queue_flag_set_unlocked(QUEUE_FLAG_DEFAULT, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); + queue_flag_set_unlocked(QUEUE_FLAG_VIRT_HOLE, ns->queue); queue_flag_clear_unlocked(QUEUE_FLAG_IO_STAT, ns->queue); ns->dev = dev; ns->queue->queuedata = ns; @@ -1801,7 +1806,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, lbaf = id->flbas & 0xf; ns->lba_shift = id->lbaf[lbaf].ds; ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); - blk_queue_max_segments(ns->queue, 1); blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); if (dev->max_hw_sectors) blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);