[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20090825094914.GR3663@in.ibm.com>
Date: Tue, 25 Aug 2009 15:19:14 +0530
From: Bharata B Rao <bharata@...ux.vnet.ibm.com>
To: linux-kernel@...r.kernel.org
Cc: Dhaval Giani <dhaval@...ux.vnet.ibm.com>,
Balbir Singh <balbir@...ux.vnet.ibm.com>,
Vaidyanathan Srinivasan <svaidy@...ux.vnet.ibm.com>,
Gautham R Shenoy <ego@...ibm.com>,
Srivatsa Vaddagiri <vatsa@...ibm.com>,
Ingo Molnar <mingo@...e.hu>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Pavel Emelyanov <xemul@...nvz.org>,
Herbert Poetzl <herbert@...hfloor.at>,
Avi Kivity <avi@...hat.com>,
Chris Friesen <cfriesen@...tel.com>,
Paul Menage <menage@...gle.com>,
Mike Waychison <mikew@...gle.com>
Subject: [RFC v1 PATCH 2/7] sched: Maintain aggregated tasks count in
cfs_rq at each hierarchy level
sched: Maintain aggregated tasks count in cfs_rq at each hierarchy level
From: Bharata B Rao <bharata@...ux.vnet.ibm.com>
This patch adds a counter to cfs_rq (->nr_tasks_running) to record the
aggregated tasks count at each level in the task group hierarchy.
This is needed by later hard limit patches where it is required to
know how many tasks go off the rq when a throttled group entity
is dequeued.
Signed-off-by: Bharata B Rao <bharata@...ux.vnet.ibm.com>
---
kernel/sched.c | 4 ++++
kernel/sched_debug.c | 2 ++
kernel/sched_fair.c | 23 +++++++++++++++++++++++
3 files changed, 29 insertions(+)
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -477,6 +477,10 @@ struct cfs_rq {
unsigned long rq_weight;
#endif
#endif
+ /*
+ * Number of tasks at this heirarchy.
+ */
+ unsigned long nr_tasks_running;
};
/* Real-Time classes' related field in a runqueue: */
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -214,6 +214,8 @@ void print_cfs_rq(struct seq_file *m, in
#ifdef CONFIG_SMP
SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares);
#endif
+ SEQ_printf(m, " .%-30s: %ld\n", "nr_tasks_running",
+ cfs_rq->nr_tasks_running);
print_cfs_group_stats(m, cpu, cfs_rq->tg);
#endif
}
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -243,6 +243,27 @@ find_matching_se(struct sched_entity **s
#endif /* CONFIG_FAIR_GROUP_SCHED */
+static void add_cfs_rq_tasks_running(struct sched_entity *se,
+ unsigned long count)
+{
+ struct cfs_rq *cfs_rq;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ cfs_rq->nr_tasks_running += count;
+ }
+}
+
+static void sub_cfs_rq_tasks_running(struct sched_entity *se,
+ unsigned long count)
+{
+ struct cfs_rq *cfs_rq;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ cfs_rq->nr_tasks_running -= count;
+ }
+}
/**************************************************************
* Scheduling class tree data structure manipulation methods:
@@ -969,6 +990,7 @@ static void enqueue_task_fair(struct rq
wakeup = 1;
}
+ add_cfs_rq_tasks_running(&p->se, 1);
hrtick_update(rq);
}
@@ -991,6 +1013,7 @@ static void dequeue_task_fair(struct rq
sleep = 1;
}
+ sub_cfs_rq_tasks_running(&p->se, 1);
hrtick_update(rq);
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists