[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20091113161506.GF17076@redhat.com>
Date: Fri, 13 Nov 2009 11:15:06 -0500
From: Vivek Goyal <vgoyal@...hat.com>
To: Corrado Zoccolo <czoccolo@...il.com>
Cc: linux-kernel@...r.kernel.org, jens.axboe@...cle.com,
nauman@...gle.com, dpshah@...gle.com, lizf@...fujitsu.com,
ryov@...inux.co.jp, fernando@....ntt.co.jp, s-uchida@...jp.nec.com,
taka@...inux.co.jp, guijianfeng@...fujitsu.com, jmoyer@...hat.com,
balbir@...ux.vnet.ibm.com, righi.andrea@...il.com,
m-ikeda@...jp.nec.com, akpm@...ux-foundation.org, riel@...hat.com,
kamezawa.hiroyu@...fujitsu.com
Subject: Re: [PATCH 05/16] blkio: Implement per cfq group latency target
and busy queue avg
On Fri, Nov 13, 2009 at 10:18:15AM -0500, Vivek Goyal wrote:
> On Fri, Nov 13, 2009 at 11:46:49AM +0100, Corrado Zoccolo wrote:
> > On Fri, Nov 13, 2009 at 12:32 AM, Vivek Goyal <vgoyal@...hat.com> wrote:
> > > static inline void
> > > @@ -441,10 +445,13 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
> > > if (cfqd->cfq_latency) {
> > > /* interested queues (we consider only the ones with the same
> > > * priority class) */
> > This comment needs to be updated
>
> Sure. Will do. Now the interested queues are the one with same priority
> class with-in group.
>
> > > * priority class) */
> > > - unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
> > > + unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
> > > + cfq_class_rt(cfqq));
> > > unsigned sync_slice = cfqd->cfq_slice[1];
> > > unsigned expect_latency = sync_slice * iq;
> > > - if (expect_latency > cfq_target_latency) {
> > > + unsigned group_target_lat = cfq_target_latency/cfqd->nr_groups;
> >
> > I'm not sure that we should divide the target latency evenly among groups.
> > Groups with different weights will have different percentage of time
> > in each 300ms round, so probably we should consider it here.
> >
>
> Taking group weight into account will be more precise thing. So may be
> I can keep track of total weight on the service tree and determine
> group target latency as proportion of total weight.
>
> group_target_lat = group_weight * cfq_target_latency/total_weight_of_groups
>
Here is the patch I generated on top of all the patches in series.
o Determine group target latency in proportion to group weight instead of
just number of groups.
Signed-off-by: Vivek Goyal <vgoyal@...hat.com>
---
block/cfq-iosched.c | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
Index: linux7/block/cfq-iosched.c
===================================================================
--- linux7.orig/block/cfq-iosched.c 2009-11-13 09:48:38.000000000 -0500
+++ linux7/block/cfq-iosched.c 2009-11-13 11:06:22.000000000 -0500
@@ -81,6 +81,7 @@ struct cfq_rb_root {
unsigned count;
u64 min_vdisktime;
struct rb_node *active;
+ unsigned total_weight;
};
#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, }
@@ -521,18 +522,28 @@ static inline unsigned cfq_group_get_avg
return cfqg->busy_queues_avg[rt];
}
+static inline unsigned
+cfq_group_latency(struct cfq_data *cfqd, struct cfq_group *cfqg)
+{
+ struct cfq_rb_root *st = &cfqd->grp_service_tree;
+
+ return cfq_target_latency * cfqg->weight / st->total_weight;
+}
+
static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
if (cfqd->cfq_latency) {
- /* interested queues (we consider only the ones with the same
- * priority class) */
+ /*
+ * interested queues (we consider only the ones with the same
+ * priority class in the cfq group)
+ */
unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
cfq_class_rt(cfqq));
unsigned sync_slice = cfqd->cfq_slice[1];
unsigned expect_latency = sync_slice * iq;
- unsigned group_target_lat = cfq_target_latency/cfqd->nr_groups;
+ unsigned group_target_lat = cfq_group_latency(cfqd, cfqq->cfqg);
if (expect_latency > group_target_lat) {
unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
@@ -799,6 +810,7 @@ cfq_group_service_tree_add(struct cfq_da
__cfq_group_service_tree_add(st, cfqg);
cfqg->on_st = true;
cfqd->nr_groups++;
+ st->total_weight += cfqg->weight;
}
static void
@@ -819,6 +831,7 @@ cfq_group_service_tree_del(struct cfq_da
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
cfqg->on_st = false;
cfqd->nr_groups--;
+ st->total_weight -= cfqg->weight;
if (!RB_EMPTY_NODE(&cfqg->rb_node))
cfq_rb_erase(&cfqg->rb_node, st);
cfqg->saved_workload_slice = 0;
@@ -2033,7 +2046,7 @@ static void choose_service_tree(struct c
* proportional to the number of queues in that workload, over
* all the queues in the same priority class
*/
- group_target_latency = cfq_target_latency/cfqd->nr_groups;
+ group_target_latency = cfq_group_latency(cfqd, cfqg);
slice = group_target_latency * count /
max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists