diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a33bd43..8c4031c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -53,6 +53,8 @@ static const int cfq_hist_divisor = 4; #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) +#define CFQD_FAST(cfqd) (hweight32(cfqd->slow_device) > 32/8) + #define RQ_CIC(rq) \ ((struct cfq_io_context *) (rq)->elevator_private[0]) #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) @@ -130,6 +132,8 @@ struct cfq_queue { unsigned long slice_end; long slice_resid; + unsigned long seeky_dispatch_start; + /* pending metadata requests */ int meta_pending; /* number of requests that are on the dispatch list or inside driver */ @@ -305,6 +309,8 @@ struct cfq_data { /* Number of groups which are on blkcg->blkg_list */ unsigned int nr_blkcg_linked_grps; + + u32 slow_device; }; static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); @@ -2073,6 +2079,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); cfq_remove_request(rq); + if (cfqq->dispatched == 0 && CFQQ_SEEKY(cfqq)) + cfqq->seeky_dispatch_start = jiffies; cfqq->dispatched++; (RQ_CFQG(rq))->dispatched++; elv_dispatch_sort(q, rq); @@ -2375,17 +2383,6 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) goto keep_queue; } - /* - * This is a deep seek queue, but the device is much faster than - * the queue can deliver, don't idle - **/ - if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && - (cfq_cfqq_slice_new(cfqq) || - (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { - cfq_clear_cfqq_deep(cfqq); - cfq_clear_cfqq_idle_window(cfqq); - } - if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; goto keep_queue; @@ -3298,7 +3295,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); - if (cfqq->queued[0] + cfqq->queued[1] >= 4) + if (cfqq->queued[0] + cfqq->queued[1] >= 4 && !CFQD_FAST(cfqd)) cfq_mark_cfqq_deep(cfqq); if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) @@ -3587,6 +3584,14 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) cfq_update_hw_tag(cfqd); + if (cfqq->seeky_dispatch_start) { + cfqd->slow_device <<= 1; + if (jiffies - cfqq->seeky_dispatch_start >= + cfqd->cfq_slice_idle / 2) + cfqd->slow_device |= 1; + cfqq->seeky_dispatch_start = 0; + } + WARN_ON(!cfqd->rq_in_driver); WARN_ON(!cfqq->dispatched); cfqd->rq_in_driver--; @@ -4089,6 +4094,7 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_group_idle = cfq_group_idle; cfqd->cfq_latency = 1; cfqd->hw_tag = -1; + cfqd->slow_device = -1; /* * we optimistically start assuming sync ops weren't delayed in last * second, in order to have larger depth for async operations.