[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1317583287-18300-10-git-send-email-glommer@parallels.com>
Date: Sun, 2 Oct 2011 23:21:26 +0400
From: Glauber Costa <glommer@...allels.com>
To: linux-kernel@...r.kernel.org
Cc: paul@...lmenage.org, lizf@...fujitsu.com, daniel.lezcano@...e.fr,
a.p.zijlstra@...llo.nl, jbottomley@...allels.com,
Glauber Costa <glommer@...allels.com>,
Balbir Singh <bsingharora@...il.com>
Subject: [PATCH 09/10] provide a version of cpuusage statistics inside cpu cgroup
For users interested in using the information currently displayed
at cpuacct.usage and cpuaact.usage_per_cpu, we provide them inside
the cpu cgroup.
Signed-off-by: Glauber Costa <glommer@...allels.com>
CC: Balbir Singh <bsingharora@...il.com>
---
kernel/sched.c | 221 +++++++++++++++++++++++++++++++++++----------------
kernel/sched_fair.c | 2 +-
kernel/sched_rt.c | 2 +-
3 files changed, 156 insertions(+), 69 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c
index 3f7c1fd..8ed9dd7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -279,6 +279,7 @@ struct task_group {
struct autogroup *autogroup;
#endif
struct kernel_stat __percpu *cpustat;
+ u64 __percpu *cpuusage;
struct timespec start_time;
};
@@ -2080,6 +2081,8 @@ static int irqtime_account_si_update(void)
#endif
+static void cpuusage_charge(struct task_struct *tsk, u64 cputime);
+
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
@@ -8130,8 +8133,10 @@ void __init sched_init(void)
root_task_group.start_time = (struct timespec){0, 0};
root_task_group.cpustat = alloc_percpu(struct kernel_stat);
+ root_task_group.cpuusage = alloc_percpu(u64);
/* Failing that early an allocation means we're screwed anyway */
BUG_ON(!root_task_group.cpustat);
+ BUG_ON(!root_task_group.cpuusage);
#endif /* CONFIG_CGROUP_SCHED */
for_each_possible_cpu(i) {
@@ -8566,7 +8571,10 @@ static void free_sched_group(struct task_group *tg)
free_fair_sched_group(tg);
free_rt_sched_group(tg);
autogroup_free(tg);
- free_percpu(tg->cpustat);
+ if (tg->cpustat)
+ free_percpu(tg->cpustat);
+ if (tg->cpuusage)
+ free_percpu(tg->cpuusage);
kfree(tg);
}
@@ -8587,6 +8595,10 @@ struct task_group *sched_create_group(struct task_group *parent)
if (!alloc_rt_sched_group(tg, parent))
goto err;
+ tg->cpuusage = alloc_percpu(u64);
+ if (!tg->cpuusage)
+ goto err;
+
tg->cpustat = alloc_percpu(struct kernel_stat);
if (!tg->cpustat)
goto err;
@@ -9106,6 +9118,88 @@ static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
}
#endif /* CONFIG_RT_GROUP_SCHED */
+static u64 cpuacct_cpuusage_read(u64 *cpuusage, int cpu)
+{
+ u64 data;
+
+#ifndef CONFIG_64BIT
+ /*
+ * Take rq->lock to make 64-bit read safe on 32-bit platforms.
+ */
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
+ data = *cpuusage;
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
+#else
+ data = *cpuusage;
+#endif
+
+ return data;
+}
+
+static void cpuacct_cpuusage_write(u64 *cpuusage, int cpu, u64 val)
+{
+#ifndef CONFIG_64BIT
+ /*
+ * Take rq->lock to make 64-bit write safe on 32-bit platforms.
+ */
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
+ *cpuusage = val;
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
+#else
+ *cpuusage = val;
+#endif
+}
+
+static u64 cpu_cgroup_cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ struct task_group *tg = cgroup_tg(cgrp);
+ u64 totalcpuusage = 0;
+ int i;
+
+ for_each_present_cpu(i) {
+ u64 *cpuusage = per_cpu_ptr(tg->cpuusage, i);
+ totalcpuusage += cpuacct_cpuusage_read(cpuusage, i);
+ }
+
+ return totalcpuusage;
+}
+
+static int cpu_cgroup_cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
+ u64 reset)
+{
+ struct task_group *tg = cgroup_tg(cgrp);
+ int err = 0;
+ int i;
+
+ if (reset) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_present_cpu(i) {
+ u64 *cpuusage = per_cpu_ptr(tg->cpuusage, i);
+ cpuacct_cpuusage_write(cpuusage, i, 0);
+ }
+
+out:
+ return err;
+}
+
+static int cpu_cgroup_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
+ struct seq_file *m)
+{
+ u64 percpu;
+ int i;
+
+ for_each_present_cpu(i) {
+ percpu = cpu_cgroup_cpuusage_read(cgroup, cft);
+ seq_printf(m, "%llu ", (unsigned long long) percpu);
+ }
+ seq_printf(m, "\n");
+ return 0;
+}
+
+
static const char *cpuacct_stat_desc[] = {
[CPUACCT_STAT_USER] = "user",
[CPUACCT_STAT_SYSTEM] = "system",
@@ -9168,6 +9262,15 @@ static struct cftype cpu_files[] = {
.name = "stat",
.read_map = cpu_cgroup_stats_show,
},
+ {
+ .name = "usage",
+ .read_u64 = cpu_cgroup_cpuusage_read,
+ .write_u64 = cpu_cgroup_cpuusage_write,
+ },
+ {
+ .name = "usage_percpu",
+ .read_seq_string = cpu_cgroup_percpu_seq_read,
+ },
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
@@ -9450,41 +9553,6 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
kfree(ca);
}
-static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
-{
- u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
- u64 data;
-
-#ifndef CONFIG_64BIT
- /*
- * Take rq->lock to make 64-bit read safe on 32-bit platforms.
- */
- raw_spin_lock_irq(&cpu_rq(cpu)->lock);
- data = *cpuusage;
- raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
-#else
- data = *cpuusage;
-#endif
-
- return data;
-}
-
-static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
-{
- u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
-
-#ifndef CONFIG_64BIT
- /*
- * Take rq->lock to make 64-bit write safe on 32-bit platforms.
- */
- raw_spin_lock_irq(&cpu_rq(cpu)->lock);
- *cpuusage = val;
- raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
-#else
- *cpuusage = val;
-#endif
-}
-
/* return total cpu usage (in nanoseconds) of a group */
static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
{
@@ -9492,8 +9560,10 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
u64 totalcpuusage = 0;
int i;
- for_each_present_cpu(i)
- totalcpuusage += cpuacct_cpuusage_read(ca, i);
+ for_each_present_cpu(i) {
+ u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
+ totalcpuusage += cpuacct_cpuusage_read(cpuusage, i);
+ }
return totalcpuusage;
}
@@ -9510,8 +9580,10 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
goto out;
}
- for_each_present_cpu(i)
- cpuacct_cpuusage_write(ca, i, 0);
+ for_each_present_cpu(i) {
+ u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
+ cpuacct_cpuusage_write(cpuusage, i, 0);
+ }
out:
return err;
@@ -9568,33 +9640,6 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
}
/*
- * charge this task's execution time to its accounting group.
- *
- * called with rq->lock held.
- */
-static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
-{
- struct cpuacct *ca;
- int cpu;
-
- if (unlikely(!cpuacct_subsys.active))
- return;
-
- cpu = task_cpu(tsk);
-
- rcu_read_lock();
-
- ca = task_ca(tsk);
-
- for (; ca; ca = ca->parent) {
- u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
- *cpuusage += cputime;
- }
-
- rcu_read_unlock();
-}
-
-/*
* When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
* in cputime_t units. As a result, cpuacct_update_stats calls
* percpu_counter_add with values large enough to always overflow the
@@ -9642,3 +9687,45 @@ struct cgroup_subsys cpuacct_subsys = {
};
#endif /* CONFIG_CGROUP_CPUACCT */
+/*
+ * charge this task's execution time to its accounting group.
+ *
+ * called with rq->lock held.
+ */
+static void cpuusage_charge(struct task_struct *tsk, u64 cputime)
+{
+ int cpu;
+
+#ifdef CONFIG_CGROUP_CPUACCT
+ struct cpuacct *ca;
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+ struct task_group *tg;
+#endif
+ cpu = task_cpu(tsk);
+
+ rcu_read_lock();
+
+#ifdef CONFIG_CGROUP_CPUACCT
+ ca = task_ca(tsk);
+
+ if (unlikely(!cpuacct_subsys.active))
+ goto no_cpuacct;
+
+ for (; ca; ca = ca->parent) {
+ u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
+ *cpuusage += cputime;
+ }
+no_cpuacct:
+#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+ tg = task_group(tsk);
+ for (; tg; tg = tg->parent) {
+ u64 *cpuusage = per_cpu_ptr(tg->cpuusage, cpu);
+ *cpuusage += cputime;
+ }
+#endif
+ rcu_read_unlock();
+}
+
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bc8ee99..38b4549 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -580,7 +580,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
struct task_struct *curtask = task_of(curr);
trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
- cpuacct_charge(curtask, delta_exec);
+ cpuusage_charge(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
}
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 97540f0..a21b58e 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -676,7 +676,7 @@ static void update_curr_rt(struct rq *rq)
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = rq->clock_task;
- cpuacct_charge(curr, delta_exec);
+ cpuusage_charge(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
--
1.7.6
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists