[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150514010913.GA4441@swordfish>
Date: Thu, 14 May 2015 10:09:13 +0900
From: Sergey Senozhatsky <sergey.senozhatsky.work@...il.com>
To: Tejun Heo <tj@...nel.org>
Cc: lizefan@...wei.com, cgroups@...r.kernel.org, mingo@...hat.com,
peterz@...radead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/3] sched, cgroup: reorganize threadgroup locking
Hello,
On (05/13/15 16:35), Tejun Heo wrote:
[..]
> -static inline void threadgroup_lock(struct task_struct *tsk)
> +static inline void threadgroup_change_begin(struct task_struct *tsk)
> {
> - down_write(&tsk->signal->group_rwsem);
> + might_sleep();
I think cgroup_threadgroup_change_begin()->down_read() already does
might_sleep() check.
-ss
> + cgroup_threadgroup_change_begin(tsk);
> }
>
> /**
> - * threadgroup_unlock - unlock threadgroup
> - * @tsk: member task of the threadgroup to unlock
> + * threadgroup_change_end - mark the end of changes to a threadgroup
> + * @tsk: task causing the changes
> *
> - * Reverse threadgroup_lock().
> + * See threadgroup_change_begin().
> */
> -static inline void threadgroup_unlock(struct task_struct *tsk)
> +static inline void threadgroup_change_end(struct task_struct *tsk)
> {
> - up_write(&tsk->signal->group_rwsem);
> + cgroup_threadgroup_change_end(tsk);
> }
> -#else
> -static inline void threadgroup_change_begin(struct task_struct *tsk) {}
> -static inline void threadgroup_change_end(struct task_struct *tsk) {}
> -static inline void threadgroup_lock(struct task_struct *tsk) {}
> -static inline void threadgroup_unlock(struct task_struct *tsk) {}
> -#endif
>
> #ifndef __HAVE_THREAD_FUNCTIONS
>
> diff --git a/kernel/cgroup.c b/kernel/cgroup.c
> index cfa27f9..9309452 100644
> --- a/kernel/cgroup.c
> +++ b/kernel/cgroup.c
> @@ -848,6 +848,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
> return cset;
> }
>
> +void cgroup_threadgroup_change_begin(struct task_struct *tsk)
> +{
> + down_read(&tsk->signal->group_rwsem);
> +}
> +
> +void cgroup_threadgroup_change_end(struct task_struct *tsk)
> +{
> + up_read(&tsk->signal->group_rwsem);
> +}
> +
> +/**
> + * threadgroup_lock - lock threadgroup
> + * @tsk: member task of the threadgroup to lock
> + *
> + * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
> + * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
> + * change ->group_leader/pid. This is useful for cases where the threadgroup
> + * needs to stay stable across blockable operations.
> + *
> + * fork and exit explicitly call threadgroup_change_{begin|end}() for
> + * synchronization. While held, no new task will be added to threadgroup
> + * and no existing live task will have its PF_EXITING set.
> + *
> + * de_thread() does threadgroup_change_{begin|end}() when a non-leader
> + * sub-thread becomes a new leader.
> + */
> +static void threadgroup_lock(struct task_struct *tsk)
> +{
> + down_write(&tsk->signal->group_rwsem);
> +}
> +
> +/**
> + * threadgroup_unlock - unlock threadgroup
> + * @tsk: member task of the threadgroup to unlock
> + *
> + * Reverse threadgroup_lock().
> + */
> +static inline void threadgroup_unlock(struct task_struct *tsk)
> +{
> + up_write(&tsk->signal->group_rwsem);
> +}
> +
> static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
> {
> struct cgroup *root_cgrp = kf_root->kn->priv;
> --
> 2.1.0
>
> --
> To unsubscribe from this list: send the line "unsubscribe cgroups" in
> the body of a message to majordomo@...r.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists