[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <AANLkTikBEbAhN_0SSz76SMewuUueOrE7K7cYr=zS3DB0@mail.gmail.com>
Date: Sun, 13 Feb 2011 20:32:58 -0800
From: Paul Menage <menage@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: balbir@...ux.vnet.ibm.com, eranian@...gle.com,
linux-kernel@...r.kernel.org, mingo@...e.hu, paulus@...ba.org,
davem@...emloft.net, fweisbec@...il.com,
perfmon2-devel@...ts.sf.net, eranian@...il.com,
robert.richter@....com, acme@...hat.com, lizf@...fujitsu.com
Subject: Re: [RFC][PATCH] cgroup: Fix cgroup_subsys::exit callback
On Tue, Feb 8, 2011 at 2:24 AM, Peter Zijlstra <peterz@...radead.org> wrote:
>
> And here I thought Google was starting to understand what community
> participation meant.. is there anybody you know who can play a more
> active role in the whole cgroup thing?
Google has plenty of people actively working on various aspects of
cgroups (particularly memory and I/O scheduling) in mainline. There's
no one tasked with core cgroups framework maintenance, but there are
folks from Fujitsu and IBM (Li Zefan, Balbir Singh, etc) who have more
experience and state in the core framework than anyone else in Google
anyway.
>
> Like the below? Both the perf and sched exit callback are fine with
> being called under task_lock afaict, but I haven't actually ran with
Sounds good to me.
Acked-by: Paul Menage <menage@...gle.com>
> lockdep enabled to see if I missed something.
>
> I also pondered doing the cgroup exit from __put_task_struct() but that
> had another problem iirc.
My guess is that by that time, so much of the task's context has been
destroyed that it comes too late in the tear-down for some/many
subsystems? Proving that guess either true or false (for all current
and future potential subsystems) would probably be tricky, though.
>
> ---
> Index: linux-2.6/include/linux/cgroup.h
> ===================================================================
> --- linux-2.6.orig/include/linux/cgroup.h
> +++ linux-2.6/include/linux/cgroup.h
> @@ -474,7 +474,8 @@ struct cgroup_subsys {
> struct cgroup *old_cgrp, struct task_struct *tsk,
> bool threadgroup);
> void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
> - void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
> + void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
> + struct cgroup *old_cgrp, struct task_struct *task);
> int (*populate)(struct cgroup_subsys *ss,
> struct cgroup *cgrp);
> void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
> Index: linux-2.6/kernel/cgroup.c
> ===================================================================
Probably also worth including a note in the docs:
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -602,6 +602,7 @@ void fork(struct cgroup_subsy *ss, struct task_struct *task)
Called when a task is forked into a cgroup.
void exit(struct cgroup_subsys *ss, struct task_struct *task)
+(task->alloc_lock held by caller)
Called during task exit.
> --- linux-2.6.orig/kernel/cgroup.c
> +++ linux-2.6/kernel/cgroup.c
> @@ -4230,20 +4230,8 @@ void cgroup_post_fork(struct task_struct
> */
> void cgroup_exit(struct task_struct *tsk, int run_callbacks)
> {
> - int i;
> struct css_set *cg;
> -
> - if (run_callbacks && need_forkexit_callback) {
> - /*
> - * modular subsystems can't use callbacks, so no need to lock
> - * the subsys array
> - */
> - for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
> - struct cgroup_subsys *ss = subsys[i];
> - if (ss->exit)
> - ss->exit(ss, tsk);
> - }
> - }
> + int i;
>
> /*
> * Unlink from the css_set task list if necessary.
> @@ -4261,7 +4249,24 @@ void cgroup_exit(struct task_struct *tsk
> task_lock(tsk);
> cg = tsk->cgroups;
> tsk->cgroups = &init_css_set;
> +
> + if (run_callbacks && need_forkexit_callback) {
> + /*
> + * modular subsystems can't use callbacks, so no need to lock
> + * the subsys array
> + */
> + for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
> + struct cgroup_subsys *ss = subsys[i];
> + if (ss->exit) {
> + struct cgroup *old_cgrp =
> + rcu_dereference_raw(cg->subsys[i])->cgroup;
> + struct cgroup *cgrp = task_cgroup(tsk, i);
> + ss->exit(ss, cgrp, old_cgrp, tsk);
> + }
> + }
> + }
> task_unlock(tsk);
> +
> if (cg)
> put_css_set_taskexit(cg);
> }
> Index: linux-2.6/kernel/sched.c
> ===================================================================
> --- linux-2.6.orig/kernel/sched.c
> +++ linux-2.6/kernel/sched.c
> @@ -606,9 +606,6 @@ static inline struct task_group *task_gr
> struct task_group *tg;
> struct cgroup_subsys_state *css;
>
> - if (p->flags & PF_EXITING)
> - return &root_task_group;
> -
> css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
> lockdep_is_held(&task_rq(p)->lock));
> tg = container_of(css, struct task_group, css);
> @@ -9081,7 +9078,8 @@ cpu_cgroup_attach(struct cgroup_subsys *
> }
>
> static void
> -cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
> +cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
> + struct cgroup *old_cgrp, struct task_struct *task)
> {
> /*
> * cgroup_exit() is called in the copy_process() failure path.
>
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists