From 0fd04112850a73f5be9fa91a29bd1791179e1e80 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 20 Dec 2016 12:53:54 +0100 Subject: [PATCH 1/3] blk-mq: Revert some of the blk-mq-sched framework changes Remove the functions that allocate and free shadow requests. Remove the get_request, put_request and completed_request callback functions from struct elevator_type. Remove blk-mq I/O scheduling functions that become superfluous due to these changes. Note: this patch breaks blk-mq I/O scheduling. Later patches will make blk-mq I/O scheduling work again. --- block/blk-mq-sched.c | 295 +---------------------------------------------- block/blk-mq-sched.h | 55 +-------- block/blk-mq.c | 50 ++++++-- block/mq-deadline.c | 90 ++------------- include/linux/elevator.h | 3 - 5 files changed, 58 insertions(+), 435 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 265e4a9cce7e..e46769db3d57 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -15,196 +15,6 @@ #include "blk-mq-tag.h" #include "blk-wbt.h" -/* - * Empty set - */ -static const struct blk_mq_ops mq_sched_tag_ops = { -}; - -void blk_mq_sched_free_requests(struct blk_mq_tags *tags) -{ - blk_mq_free_rq_map(NULL, tags, 0); -} -EXPORT_SYMBOL_GPL(blk_mq_sched_free_requests); - -struct blk_mq_tags *blk_mq_sched_alloc_requests(unsigned int depth, - unsigned int numa_node) -{ - struct blk_mq_tag_set set = { - .ops = &mq_sched_tag_ops, - .nr_hw_queues = 1, - .queue_depth = depth, - .numa_node = numa_node, - }; - - return blk_mq_init_rq_map(&set, 0); -} -EXPORT_SYMBOL_GPL(blk_mq_sched_alloc_requests); - -void blk_mq_sched_free_hctx_data(struct request_queue *q, - void (*exit)(struct blk_mq_hw_ctx *)) -{ - struct blk_mq_hw_ctx *hctx; - int i; - - queue_for_each_hw_ctx(q, hctx, i) { - if (exit) - exit(hctx); - kfree(hctx->sched_data); - hctx->sched_data = NULL; - } -} -EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); - -int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size, - int (*init)(struct blk_mq_hw_ctx *), - void (*exit)(struct blk_mq_hw_ctx *)) -{ - struct blk_mq_hw_ctx *hctx; - int ret; - int i; - - queue_for_each_hw_ctx(q, hctx, i) { - hctx->sched_data = kmalloc_node(size, GFP_KERNEL, hctx->numa_node); - if (!hctx->sched_data) { - ret = -ENOMEM; - goto error; - } - - if (init) { - ret = init(hctx); - if (ret) { - /* - * We don't want to give exit() a partially - * initialized sched_data. init() must clean up - * if it fails. - */ - kfree(hctx->sched_data); - hctx->sched_data = NULL; - goto error; - } - } - } - - return 0; -error: - blk_mq_sched_free_hctx_data(q, exit); - return ret; -} -EXPORT_SYMBOL_GPL(blk_mq_sched_init_hctx_data); - -struct request *blk_mq_sched_alloc_shadow_request(struct request_queue *q, - struct blk_mq_alloc_data *data, - struct blk_mq_tags *tags, - atomic_t *wait_index) -{ - struct sbq_wait_state *ws; - DEFINE_WAIT(wait); - struct request *rq; - int tag; - - tag = __sbitmap_queue_get(&tags->bitmap_tags); - if (tag != -1) - goto done; - - if (data->flags & BLK_MQ_REQ_NOWAIT) - return NULL; - - ws = sbq_wait_ptr(&tags->bitmap_tags, wait_index); - do { - prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); - - tag = __sbitmap_queue_get(&tags->bitmap_tags); - if (tag != -1) - break; - - blk_mq_run_hw_queue(data->hctx, false); - - tag = __sbitmap_queue_get(&tags->bitmap_tags); - if (tag != -1) - break; - - blk_mq_put_ctx(data->ctx); - io_schedule(); - - data->ctx = blk_mq_get_ctx(data->q); - data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); - finish_wait(&ws->wait, &wait); - ws = sbq_wait_ptr(&tags->bitmap_tags, wait_index); - } while (1); - - finish_wait(&ws->wait, &wait); -done: - rq = tags->rqs[tag]; - rq->tag = tag; - rq->rq_flags = RQF_ALLOCED; - return rq; -} -EXPORT_SYMBOL_GPL(blk_mq_sched_alloc_shadow_request); - -void blk_mq_sched_free_shadow_request(struct blk_mq_tags *tags, - struct request *rq) -{ - WARN_ON_ONCE(!(rq->rq_flags & RQF_ALLOCED)); - sbitmap_queue_clear(&tags->bitmap_tags, rq->tag, rq->mq_ctx->cpu); -} -EXPORT_SYMBOL_GPL(blk_mq_sched_free_shadow_request); - -static void rq_copy(struct request *rq, struct request *src) -{ -#define FIELD_COPY(dst, src, name) ((dst)->name = (src)->name) - FIELD_COPY(rq, src, cpu); - FIELD_COPY(rq, src, cmd_type); - FIELD_COPY(rq, src, cmd_flags); - rq->rq_flags |= (src->rq_flags & (RQF_PREEMPT | RQF_QUIET | RQF_PM | RQF_DONTPREP)); - rq->rq_flags &= ~RQF_IO_STAT; - FIELD_COPY(rq, src, __data_len); - FIELD_COPY(rq, src, __sector); - FIELD_COPY(rq, src, bio); - FIELD_COPY(rq, src, biotail); - FIELD_COPY(rq, src, rq_disk); - FIELD_COPY(rq, src, part); - FIELD_COPY(rq, src, issue_stat); - src->issue_stat.time = 0; - FIELD_COPY(rq, src, nr_phys_segments); -#if defined(CONFIG_BLK_DEV_INTEGRITY) - FIELD_COPY(rq, src, nr_integrity_segments); -#endif - FIELD_COPY(rq, src, ioprio); - FIELD_COPY(rq, src, timeout); - - if (src->cmd_type == REQ_TYPE_BLOCK_PC) { - FIELD_COPY(rq, src, cmd); - FIELD_COPY(rq, src, cmd_len); - FIELD_COPY(rq, src, extra_len); - FIELD_COPY(rq, src, sense_len); - FIELD_COPY(rq, src, resid_len); - FIELD_COPY(rq, src, sense); - FIELD_COPY(rq, src, retries); - } - - src->bio = src->biotail = NULL; -} - -static void sched_rq_end_io(struct request *rq, int error) -{ - struct request *sched_rq = rq->end_io_data; - - FIELD_COPY(sched_rq, rq, resid_len); - FIELD_COPY(sched_rq, rq, extra_len); - FIELD_COPY(sched_rq, rq, sense_len); - FIELD_COPY(sched_rq, rq, errors); - FIELD_COPY(sched_rq, rq, retries); - - blk_account_io_completion(sched_rq, blk_rq_bytes(sched_rq)); - blk_account_io_done(sched_rq); - - if (sched_rq->end_io) - sched_rq->end_io(sched_rq, error); - - blk_mq_finish_request(rq); -} - static inline struct request * __blk_mq_sched_alloc_request(struct blk_mq_hw_ctx *hctx) { @@ -225,55 +35,6 @@ __blk_mq_sched_alloc_request(struct blk_mq_hw_ctx *hctx) return rq; } -static inline void -__blk_mq_sched_init_request_from_shadow(struct request *rq, - struct request *sched_rq) -{ - WARN_ON_ONCE(!(sched_rq->rq_flags & RQF_ALLOCED)); - rq_copy(rq, sched_rq); - rq->end_io = sched_rq_end_io; - rq->end_io_data = sched_rq; -} - -struct request * -blk_mq_sched_request_from_shadow(struct blk_mq_hw_ctx *hctx, - struct request *(*get_sched_rq)(struct blk_mq_hw_ctx *)) -{ - struct request *rq, *sched_rq; - - rq = __blk_mq_sched_alloc_request(hctx); - if (!rq) - return NULL; - - sched_rq = get_sched_rq(hctx); - if (sched_rq) { - __blk_mq_sched_init_request_from_shadow(rq, sched_rq); - return rq; - } - - /* - * __blk_mq_finish_request() drops a queue ref we already hold, - * so grab an extra one. - */ - blk_queue_enter_live(hctx->queue); - __blk_mq_finish_request(hctx, rq->mq_ctx, rq); - return NULL; -} -EXPORT_SYMBOL_GPL(blk_mq_sched_request_from_shadow); - -struct request *__blk_mq_sched_request_from_shadow(struct blk_mq_hw_ctx *hctx, - struct request *sched_rq) -{ - struct request *rq; - - rq = __blk_mq_sched_alloc_request(hctx); - if (rq) - __blk_mq_sched_init_request_from_shadow(rq, sched_rq); - - return rq; -} -EXPORT_SYMBOL_GPL(__blk_mq_sched_request_from_shadow); - static void __blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq, struct io_context *ioc) { @@ -298,8 +59,8 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q, rq->elv.icq = NULL; } -static void blk_mq_sched_assign_ioc(struct request_queue *q, - struct request *rq, struct bio *bio) +void blk_mq_sched_assign_ioc(struct request_queue *q, struct request *rq, + struct bio *bio) { struct io_context *ioc; @@ -308,44 +69,9 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q, __blk_mq_sched_assign_ioc(q, rq, ioc); } -struct request *blk_mq_sched_get_request(struct request_queue *q, - struct bio *bio, - unsigned int op, - struct blk_mq_alloc_data *data) -{ - struct elevator_queue *e = q->elevator; - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; - struct request *rq; - - blk_queue_enter_live(q); - ctx = blk_mq_get_ctx(q); - hctx = blk_mq_map_queue(q, ctx->cpu); - - blk_mq_set_alloc_data(data, q, 0, ctx, hctx); - - if (e && e->type->ops.mq.get_request) - rq = e->type->ops.mq.get_request(q, op, data); - else - rq = __blk_mq_alloc_request(data, op); - - if (rq) { - rq->elv.icq = NULL; - if (e && e->type->icq_cache) - blk_mq_sched_assign_ioc(q, rq, bio); - data->hctx->queued++; - return rq; - } - - blk_queue_exit(q); - return NULL; -} - void blk_mq_sched_put_request(struct request *rq) { struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; - bool has_queue_ref = false, do_free = false; wbt_done(q->rq_wb, &rq->issue_stat); @@ -357,22 +83,7 @@ void blk_mq_sched_put_request(struct request *rq) } } - /* - * If we are freeing a shadow that hasn't been started, then drop - * our queue ref on it. This normally happens at IO completion - * time, but if we merge request-to-request, then this 'rq' will - * never get started or completed. - */ - if (blk_mq_sched_rq_is_shadow(rq) && !(rq->rq_flags & RQF_STARTED)) - has_queue_ref = true; - - if (e && e->type->ops.mq.put_request) - do_free = !e->type->ops.mq.put_request(rq); - - if (do_free) - blk_mq_finish_request(rq); - if (has_queue_ref) - blk_queue_exit(q); + blk_mq_finish_request(rq); } void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 8ff37f9782e9..6b8c314b5c20 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -3,30 +3,6 @@ #include "blk-mq.h" -struct blk_mq_tags *blk_mq_sched_alloc_requests(unsigned int depth, unsigned int numa_node); -void blk_mq_sched_free_requests(struct blk_mq_tags *tags); - -int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size, - int (*init)(struct blk_mq_hw_ctx *), - void (*exit)(struct blk_mq_hw_ctx *)); - -void blk_mq_sched_free_hctx_data(struct request_queue *q, - void (*exit)(struct blk_mq_hw_ctx *)); - -void blk_mq_sched_free_shadow_request(struct blk_mq_tags *tags, - struct request *rq); -struct request *blk_mq_sched_alloc_shadow_request(struct request_queue *q, - struct blk_mq_alloc_data *data, - struct blk_mq_tags *tags, - atomic_t *wait_index); -struct request * -blk_mq_sched_request_from_shadow(struct blk_mq_hw_ctx *hctx, - struct request *(*get_sched_rq)(struct blk_mq_hw_ctx *)); -struct request * -__blk_mq_sched_request_from_shadow(struct blk_mq_hw_ctx *hctx, - struct request *sched_rq); - -struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data); void blk_mq_sched_put_request(struct request *rq); void __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); @@ -35,6 +11,9 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); +void blk_mq_sched_assign_ioc(struct request_queue *q, + struct request *rq, struct bio *bio); + void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); int blk_mq_sched_init(struct request_queue *q); @@ -109,22 +88,6 @@ blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx, blk_mq_run_hw_queue(hctx, run_queue_async); } -static inline void -blk_mq_sched_dispatch_shadow_requests(struct blk_mq_hw_ctx *hctx, - struct list_head *rq_list, - struct request *(*get_sched_rq)(struct blk_mq_hw_ctx *)) -{ - do { - struct request *rq; - - rq = blk_mq_sched_request_from_shadow(hctx, get_sched_rq); - if (!rq) - break; - - list_add_tail(&rq->queuelist, rq_list); - } while (1); -} - static inline bool blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) @@ -140,11 +103,6 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, static inline void blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct elevator_queue *e = hctx->queue->elevator; - - if (e && e->type->ops.mq.completed_request) - e->type->ops.mq.completed_request(hctx, rq); - if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); blk_mq_run_hw_queue(hctx, true); @@ -179,11 +137,4 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) return false; } -/* - * Returns true if this is an internal shadow request - */ -static inline bool blk_mq_sched_rq_is_shadow(struct request *rq) -{ - return (rq->rq_flags & RQF_ALLOCED) != 0; -} #endif diff --git a/block/blk-mq.c b/block/blk-mq.c index 3a19834211b2..35e1162602f5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -245,6 +245,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, unsigned int flags) { struct blk_mq_alloc_data alloc_data; + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; struct request *rq; int ret; @@ -252,13 +254,16 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, if (ret) return ERR_PTR(ret); - rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); - - blk_mq_put_ctx(alloc_data.ctx); - blk_queue_exit(q); + ctx = blk_mq_get_ctx(q); + hctx = blk_mq_map_queue(q, ctx->cpu); + blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); + rq = __blk_mq_alloc_request(&alloc_data, rw); + blk_mq_put_ctx(ctx); - if (!rq) + if (!rq) { + blk_queue_exit(q); return ERR_PTR(-EWOULDBLOCK); + } rq->__data_len = 0; rq->__sector = (sector_t) -1; @@ -324,7 +329,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, const int tag = rq->tag; struct request_queue *q = rq->q; - blk_mq_sched_completed_request(hctx, rq); + ctx->rq_completed[rq_is_sync(rq)]++; if (rq->rq_flags & RQF_MQ_INFLIGHT) atomic_dec(&hctx->nr_active); @@ -1246,6 +1251,34 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, } } +static struct request *blk_mq_get_request(struct request_queue *q, + struct bio *bio, + struct blk_mq_alloc_data *data) +{ + struct elevator_queue *e = q->elevator; + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + struct request *rq; + + blk_queue_enter_live(q); + ctx = blk_mq_get_ctx(q); + hctx = blk_mq_map_queue(q, ctx->cpu); + + trace_block_getrq(q, bio, bio->bi_opf); + blk_mq_set_alloc_data(data, q, 0, ctx, hctx); + rq = __blk_mq_alloc_request(data, bio->bi_opf); + + if (rq) { + rq->elv.icq = NULL; + if (e && e->type->icq_cache) + blk_mq_sched_assign_ioc(q, rq, bio); + data->hctx->queued++; + return rq; + } + + return rq; +} + static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie) { struct request_queue *q = rq->q; @@ -1328,7 +1361,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) trace_block_getrq(q, bio, bio->bi_opf); - rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data); + rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { __wbt_done(q->rq_wb, wb_acct); return BLK_QC_T_NONE; @@ -1448,7 +1481,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) trace_block_getrq(q, bio, bio->bi_opf); - rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data); + rq = blk_mq_get_request(q, bio, &data); if (unlikely(!rq)) { __wbt_done(q->rq_wb, wb_acct); return BLK_QC_T_NONE; @@ -1504,6 +1537,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) blk_mq_sched_insert_request(rq, false, true, true); goto done; } + if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { /* * For a SYNC request, send it to the hardware immediately. For diff --git a/block/mq-deadline.c b/block/mq-deadline.c index e26c02798041..9a4039d9b4f0 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -63,8 +63,6 @@ struct deadline_data { spinlock_t lock; struct list_head dispatch; - struct blk_mq_tags *tags; - atomic_t wait_index; }; static inline struct rb_root * @@ -300,7 +298,13 @@ static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx) static void dd_dispatch_requests(struct blk_mq_hw_ctx *hctx, struct list_head *rq_list) { - blk_mq_sched_dispatch_shadow_requests(hctx, rq_list, __dd_dispatch_request); + for (;;) { + struct request *rq = __dd_dispatch_request(hctx); + if (!rq) + break; + + list_add_tail(&rq->queuelist, rq_list); + } } static void dd_exit_queue(struct elevator_queue *e) @@ -310,7 +314,6 @@ static void dd_exit_queue(struct elevator_queue *e) BUG_ON(!list_empty(&dd->fifo_list[READ])); BUG_ON(!list_empty(&dd->fifo_list[WRITE])); - blk_mq_sched_free_requests(dd->tags); kfree(dd); } @@ -333,13 +336,6 @@ static int dd_init_queue(struct request_queue *q, struct elevator_type *e) } eq->elevator_data = dd; - dd->tags = blk_mq_sched_alloc_requests(queue_depth, q->node); - if (!dd->tags) { - kfree(dd); - kobject_put(&eq->kobj); - return -ENOMEM; - } - INIT_LIST_HEAD(&dd->fifo_list[READ]); INIT_LIST_HEAD(&dd->fifo_list[WRITE]); dd->sort_list[READ] = RB_ROOT; @@ -351,7 +347,6 @@ static int dd_init_queue(struct request_queue *q, struct elevator_type *e) dd->fifo_batch = fifo_batch; spin_lock_init(&dd->lock); INIT_LIST_HEAD(&dd->dispatch); - atomic_set(&dd->wait_index, 0); q->elevator = eq; return 0; @@ -409,11 +404,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_mq_sched_request_inserted(rq); /* - * If we're trying to insert a real request, just send it directly - * to the hardware dispatch list. This only happens for a requeue, - * or FUA/FLUSH requests. + * Send FUA and FLUSH requests directly to the hardware dispatch list. + * To do: also send requeued requests directly to the hw disp list. */ - if (!blk_mq_sched_rq_is_shadow(rq)) { + if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) { spin_lock(&hctx->lock); list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); @@ -459,67 +453,6 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, spin_unlock(&dd->lock); } -static struct request *dd_get_request(struct request_queue *q, unsigned int op, - struct blk_mq_alloc_data *data) -{ - struct deadline_data *dd = q->elevator->elevator_data; - struct request *rq; - - /* - * The flush machinery intercepts before we insert the request. As - * a work-around, just hand it back a real request. - */ - if (unlikely(op & (REQ_PREFLUSH | REQ_FUA))) - rq = __blk_mq_alloc_request(data, op); - else { - rq = blk_mq_sched_alloc_shadow_request(q, data, dd->tags, &dd->wait_index); - if (rq) - blk_mq_rq_ctx_init(q, data->ctx, rq, op); - } - - return rq; -} - -static bool dd_put_request(struct request *rq) -{ - /* - * If it's a real request, we just have to free it. Return false - * to say we didn't handle it, and blk_mq_sched will take care of that. - */ - if (!blk_mq_sched_rq_is_shadow(rq)) - return false; - - if (!(rq->rq_flags & RQF_STARTED)) { - struct request_queue *q = rq->q; - struct deadline_data *dd = q->elevator->elevator_data; - - /* - * IO completion would normally do this, but if we merge - * and free before we issue the request, we need to free - * the shadow tag here. - */ - blk_mq_sched_free_shadow_request(dd->tags, rq); - } - - return true; -} - -static void dd_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq) -{ - struct request *sched_rq = rq->end_io_data; - - /* - * sched_rq can be NULL, if we haven't setup the shadow yet - * because we failed getting one. - */ - if (sched_rq) { - struct deadline_data *dd = hctx->queue->elevator->elevator_data; - - blk_mq_sched_free_shadow_request(dd->tags, sched_rq); - blk_mq_start_stopped_hw_queue(hctx, true); - } -} - static bool dd_has_work(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; @@ -601,11 +534,8 @@ static struct elv_fs_entry deadline_attrs[] = { static struct elevator_type mq_deadline = { .ops.mq = { - .get_request = dd_get_request, - .put_request = dd_put_request, .insert_requests = dd_insert_requests, .dispatch_requests = dd_dispatch_requests, - .completed_request = dd_completed_request, .next_request = elv_rb_latter_request, .former_request = elv_rb_former_request, .bio_merge = dd_bio_merge, diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 64224d39d707..312e6d3e89fa 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -89,12 +89,9 @@ struct elevator_mq_ops { int (*request_merge)(struct request_queue *q, struct request **, struct bio *); void (*request_merged)(struct request_queue *, struct request *, int); void (*requests_merged)(struct request_queue *, struct request *, struct request *); - struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *); - bool (*put_request)(struct request *); void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); void (*dispatch_requests)(struct blk_mq_hw_ctx *, struct list_head *); bool (*has_work)(struct blk_mq_hw_ctx *); - void (*completed_request)(struct blk_mq_hw_ctx *, struct request *); void (*started_request)(struct request *); void (*requeue_request)(struct request *); struct request *(*former_request)(struct request_queue *, struct request *); -- 2.11.0