diff --git a/block/blk-core.c b/block/blk-core.c index 7af4a4898dcb..0bc030aff0d0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1307,7 +1307,7 @@ void __blk_put_request(struct request_queue *q, struct request *req) struct request_list *rl = blk_rq_rl(req); BUG_ON(!list_empty(&req->queuelist)); - BUG_ON(!hlist_unhashed(&req->hash)); + BUG_ON(ELV_ON_HASH(req)); blk_free_request(rl, req); freed_request(rl, flags); diff --git a/block/blk-softirq.c b/block/blk-softirq.c index ebd6b6f1bdeb..53b1737e978d 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h) while (!list_empty(&local_list)) { struct request *rq; - rq = list_entry(local_list.next, struct request, queuelist); - list_del_init(&rq->queuelist); + rq = list_entry(local_list.next, struct request, ipi_list); + list_del_init(&rq->ipi_list); rq->q->softirq_done_fn(rq); } } @@ -45,14 +45,9 @@ static void trigger_softirq(void *data) local_irq_save(flags); list = this_cpu_ptr(&blk_cpu_done); - /* - * We reuse queuelist for a list of requests to process. Since the - * queuelist is used by the block layer only for requests waiting to be - * submitted to the device it is unused now. - */ - list_add_tail(&rq->queuelist, list); + list_add_tail(&rq->ipi_list, list); - if (list->next == &rq->queuelist) + if (list->next == &rq->ipi_list) raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); @@ -141,7 +136,7 @@ void __blk_complete_request(struct request *req) struct list_head *list; do_local: list = this_cpu_ptr(&blk_cpu_done); - list_add_tail(&req->queuelist, list); + list_add_tail(&req->ipi_list, list); /* * if the list only contains our just added request, @@ -149,7 +144,7 @@ do_local: * entries there, someone already raised the irq but it * hasn't run yet. */ - if (list->next == &req->queuelist) + if (list->next == &req->ipi_list) raise_softirq_irqoff(BLOCK_SOFTIRQ); } else if (raise_blk_irq(ccpu, req)) goto do_local; diff --git a/block/blk.h b/block/blk.h index d23b415b8a28..1d880f1f957f 100644 --- a/block/blk.h +++ b/block/blk.h @@ -78,7 +78,7 @@ static inline void blk_clear_rq_complete(struct request *rq) /* * Internal elevator interface */ -#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash) +#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) void blk_insert_flush(struct request *rq); void blk_abort_flushes(struct request_queue *q); diff --git a/block/elevator.c b/block/elevator.c index 42c45a7d6714..1e01b66a0b92 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit); static inline void __elv_rqhash_del(struct request *rq) { hash_del(&rq->hash); + rq->cmd_flags &= ~REQ_HASHED; } static void elv_rqhash_del(struct request_queue *q, struct request *rq) @@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq) BUG_ON(ELV_ON_HASH(rq)); hash_add(e->hash, &rq->hash, rq_hash_key(rq)); + rq->cmd_flags |= REQ_HASHED; } static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index bbc3a6c88fce..aa0eaa2d0bd8 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -189,6 +189,7 @@ enum rq_flag_bits { __REQ_KERNEL, /* direct IO to kernel pages */ __REQ_PM, /* runtime pm request */ __REQ_END, /* last of chain of requests */ + __REQ_HASHED, /* on IO scheduler merge hash */ __REQ_NR_BITS, /* stops here */ }; @@ -241,5 +242,6 @@ enum rq_flag_bits { #define REQ_KERNEL (1ULL << __REQ_KERNEL) #define REQ_PM (1ULL << __REQ_PM) #define REQ_END (1ULL << __REQ_END) +#define REQ_HASHED (1ULL << __REQ_HASHED) #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5a31307c5ded..e402f8421c0c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -118,7 +118,18 @@ struct request { struct bio *bio; struct bio *biotail; - struct hlist_node hash; /* merge hash */ + /* + * The hash is used inside the scheduler, and killed once the + * request reaches the dispatch list. The ipi_list is only used + * to queue the request for softirq completion, which is long + * after the request has been unhashed (and even removed from + * the dispatch list). + */ + union { + struct hlist_node hash; /* merge hash */ + struct list_head ipi_list; + }; + /* * The rb_node is only used inside the io scheduler, requests * are pruned when moved to the dispatch queue. So let the