Remove the correlation between max_sectors_kb and read_ahead_kb. It's unnecessary to reduce readahead size when setting max_sectors_kb. Signed-off-by: Wu Fengguang Signed-off-by: Andrew Morton --- linux-2.6.19-rc5-mm2.orig/block/ll_rw_blk.c +++ linux-2.6.19-rc5-mm2/block/ll_rw_blk.c @@ -3827,25 +3827,11 @@ queue_max_sectors_store(struct request_q max_hw_sectors_kb = q->max_hw_sectors >> 1, page_kb = 1 << (PAGE_CACHE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); - int ra_kb; if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; - /* - * Take the queue lock to update the readahead and max_sectors - * values synchronously: - */ - spin_lock_irq(q->queue_lock); - /* - * Trim readahead window as well, if necessary: - */ - ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); - if (ra_kb > max_sectors_kb) - q->backing_dev_info.ra_pages = - max_sectors_kb >> (PAGE_CACHE_SHIFT - 10); q->max_sectors = max_sectors_kb << 1; - spin_unlock_irq(q->queue_lock); return ret; } -- - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/