lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <BLU436-SMTP50F620E045B145FB2C25A5C07C0@phx.gbl>
Date:	Fri, 14 Aug 2015 22:50:34 +0800
From:	"T. Zhou" <t.s.zhou@...mail.com>
To:	byungchul.park@....com
CC:	mingo@...nel.org, peterz@...radead.org,
	linux-kernel@...r.kernel.org, yuyang.du@...el.com
Subject: Re: [PATCH 2/2] sched: make task_move_group_fair simple by using
 switched_to(from)_fair

Hi,

On Thu, Aug 13, 2015 at 07:49:19PM +0900, byungchul.park@....com wrote:
> +static inline int need_vruntime_adjust(struct task_struct *p, int queued)
> +{
> +	struct sched_entity *se = &p->se;
> +
> +	/*
> +	 * When !queued, vruntime of the task has usually NOT been normalized.
> +	 * But there are some cases where it has already been normalized:
> +	 *
> +	 * - Moving a forked child which is waiting for being woken up by
> +	 *   wake_up_new_task().
> +	 * - Moving a task which has been woken up by try_to_wake_up() and
> +	 *   waiting for actually being woken up by sched_ttwu_pending().
> +	 */
> +	if (queued)
> +		return 0;
> +
> +	if (!se->sum_exec_runtime || p->state == TASK_WAKING)
> +		return 0;
> +
> +	if (p->state != TASK_RUNNING)
> +		return 1;
> +
> +	return 0;
> +}

Original switched_from/to_fair() do not include the following check.

	if (!se->sum_exec_runtime || p->state == TASK_WAKING)
		return 0;

I guess you're sure this check will always fail in switched_from/to_fair() case

Thanks
>  static void switched_from_fair(struct rq *rq, struct task_struct *p)
>  {
>  	struct sched_entity *se = &p->se;
> @@ -7918,7 +7943,7 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
>  	 * have normalized the vruntime, if it's !queued, then only when
>  	 * the task is sleeping will it still have non-normalized vruntime.
>  	 */
> -	if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
> +	if (need_vruntime_adjust(p, task_on_rq_queued(p))) {
>  		/*
>  		 * Fix up our vruntime so that the current sleep doesn't
>  		 * cause 'unlimited' sleep bonus.
> @@ -7939,7 +7964,6 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
>  static void switched_to_fair(struct rq *rq, struct task_struct *p)
>  {
>  	struct sched_entity *se = &p->se;
> -
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>  	/*
>  	 * Since the real-depth could have been changed (only FAIR
> @@ -7964,7 +7988,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
>  		 * has non-normalized vruntime, if it's !queued, then it still has
>  		 * normalized vruntime.
>  		 */
> -		if (p->state != TASK_RUNNING)
> +		if (need_vruntime_adjust(p, task_on_rq_queued(p)))
>  			se->vruntime += cfs_rq_of(se)->min_vruntime;
>  		return;
>  	}
> @@ -8014,55 +8038,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>  static void task_move_group_fair(struct task_struct *p, int queued)
>  {
> -	struct sched_entity *se = &p->se;
> -	struct cfs_rq *cfs_rq;
> -
> -	/*
> -	 * If the task was not on the rq at the time of this cgroup movement
> -	 * it must have been asleep, sleeping tasks keep their ->vruntime
> -	 * absolute on their old rq until wakeup (needed for the fair sleeper
> -	 * bonus in place_entity()).
> -	 *
> -	 * If it was on the rq, we've just 'preempted' it, which does convert
> -	 * ->vruntime to a relative base.
> -	 *
> -	 * Make sure both cases convert their relative position when migrating
> -	 * to another cgroup's rq. This does somewhat interfere with the
> -	 * fair sleeper stuff for the first placement, but who cares.
> -	 */
> -	/*
> -	 * When !queued, vruntime of the task has usually NOT been normalized.
> -	 * But there are some cases where it has already been normalized:
> -	 *
> -	 * - Moving a forked child which is waiting for being woken up by
> -	 *   wake_up_new_task().
> -	 * - Moving a task which has been woken up by try_to_wake_up() and
> -	 *   waiting for actually being woken up by sched_ttwu_pending().
> -	 *
> -	 * To prevent boost or penalty in the new cfs_rq caused by delta
> -	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
> -	 */
> -	if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
> -		queued = 1;
> -
> -	cfs_rq = cfs_rq_of(se);
> -	if (!queued)
> -		se->vruntime -= cfs_rq->min_vruntime;
> -
> -#ifdef CONFIG_SMP
> -	/* synchronize task with its prev cfs_rq */
> -	detach_entity_load_avg(cfs_rq, se);
> -#endif
> +	switched_from_fair(task_rq(p), p);
>  	set_task_rq(p, task_cpu(p));
> -	se->depth = se->parent ? se->parent->depth + 1 : 0;
> -	cfs_rq = cfs_rq_of(se);
> -	if (!queued)
> -		se->vruntime += cfs_rq->min_vruntime;
> -
> -#ifdef CONFIG_SMP
> -	/* Virtually synchronize task with its new cfs_rq */
> -	attach_entity_load_avg(cfs_rq, se);
> -#endif
> +	switched_to_fair(task_rq(p), p);
>  }
>  
>  void free_fair_sched_group(struct task_group *tg)

-- 
Tao
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ