[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <tip-8f0dfc34e9b323a028c2ec41abb7e9de477b7a94@git.kernel.org>
Date: Wed, 2 Sep 2009 07:00:46 GMT
From: tip-bot for Arjan van de Ven <arjan@...ux.intel.com>
To: linux-tip-commits@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, hpa@...or.com, mingo@...hat.com,
a.p.zijlstra@...llo.nl, arjan@...ux.intel.com, tglx@...utronix.de,
mingo@...e.hu
Subject: [tip:sched/core] sched: Provide iowait counters
Commit-ID: 8f0dfc34e9b323a028c2ec41abb7e9de477b7a94
Gitweb: http://git.kernel.org/tip/8f0dfc34e9b323a028c2ec41abb7e9de477b7a94
Author: Arjan van de Ven <arjan@...ux.intel.com>
AuthorDate: Mon, 20 Jul 2009 11:26:58 -0700
Committer: Ingo Molnar <mingo@...e.hu>
CommitDate: Wed, 2 Sep 2009 08:44:08 +0200
sched: Provide iowait counters
For counting how long an application has been waiting for
(disk) IO, there currently is only the HZ sample driven
information available, while for all other counters in this
class, a high resolution version is available via
CONFIG_SCHEDSTATS.
In order to make an improved bootchart tool possible, we also
need a higher resolution version of the iowait time.
This patch below adds this scheduler statistic to the kernel.
Signed-off-by: Arjan van de Ven <arjan@...ux.intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
LKML-Reference: <4A64B813.1080506@...ux.intel.com>
Signed-off-by: Ingo Molnar <mingo@...e.hu>
---
include/linux/sched.h | 4 ++++
kernel/sched.c | 4 ++++
kernel/sched_debug.c | 4 ++++
kernel/sched_fair.c | 5 +++++
4 files changed, 17 insertions(+), 0 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e209ae0..9c96ef2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1111,6 +1111,8 @@ struct sched_entity {
u64 wait_max;
u64 wait_count;
u64 wait_sum;
+ u64 iowait_count;
+ u64 iowait_sum;
u64 sleep_start;
u64 sleep_max;
@@ -1231,6 +1233,8 @@ struct task_struct {
unsigned did_exec:1;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
+ unsigned in_iowait:1;
+
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
diff --git a/kernel/sched.c b/kernel/sched.c
index 6244d24..38d05a8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6754,7 +6754,9 @@ void __sched io_schedule(void)
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
+ current->in_iowait = 1;
schedule();
+ current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
}
@@ -6767,7 +6769,9 @@ long __sched io_schedule_timeout(long timeout)
delayacct_blkio_start();
atomic_inc(&rq->nr_iowait);
+ current->in_iowait = 1;
ret = schedule_timeout(timeout);
+ current->in_iowait = 0;
atomic_dec(&rq->nr_iowait);
delayacct_blkio_end();
return ret;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 70c7e0b..5ddbd08 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -409,6 +409,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.wait_max);
PN(se.wait_sum);
P(se.wait_count);
+ PN(se.iowait_sum);
+ P(se.iowait_count);
P(sched_info.bkl_count);
P(se.nr_migrations);
P(se.nr_migrations_cold);
@@ -479,6 +481,8 @@ void proc_sched_set_task(struct task_struct *p)
p->se.wait_max = 0;
p->se.wait_sum = 0;
p->se.wait_count = 0;
+ p->se.iowait_sum = 0;
+ p->se.iowait_count = 0;
p->se.sleep_max = 0;
p->se.sum_sleep_runtime = 0;
p->se.block_max = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 342000b..471fa28 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -652,6 +652,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->sum_sleep_runtime += delta;
if (tsk) {
+ if (tsk->in_iowait) {
+ se->iowait_sum += delta;
+ se->iowait_count++;
+ }
+
/*
* Blocking time is in units of nanosecs, so shift by
* 20 to get a milliseconds-range estimation of the
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists