lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20101229143136.GC2728@zhy>
Date:	Wed, 29 Dec 2010 22:31:36 +0800
From:	Yong Zhang <yong.zhang0@...il.com>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	Chris Mason <chris.mason@...cle.com>,
	Frank Rowand <frank.rowand@...sony.com>,
	Ingo Molnar <mingo@...e.hu>,
	Thomas Gleixner <tglx@...utronix.de>,
	Mike Galbraith <efault@....de>,
	Oleg Nesterov <oleg@...hat.com>, Paul Turner <pjt@...gle.com>,
	Jens Axboe <axboe@...nel.dk>,
	Steven Rostedt <rostedt@...dmis.org>,
	linux-kernel@...r.kernel.org
Subject: Re: [RFC][PATCH 08/17] sched: Drop the rq argument to
 sched_class::select_task_rq()

On Fri, Dec 24, 2010 at 01:23:46PM +0100, Peter Zijlstra wrote:
> In preparation of calling select_task_rq() without rq->lock held, drop
> the dependency on the rq argument.
> 
> Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
> ---
> @@ -3416,27 +3409,22 @@ void sched_exec(void)
>  {
>  	struct task_struct *p = current;
>  	unsigned long flags;
> -	struct rq *rq;
>  	int dest_cpu;
>  
> -	rq = task_rq_lock(p, &flags);
> -	dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
> +	raw_spin_lock_irqsave(&p->pi_lock, flags);

Seems this should go to patch 07/17 ;)

> +	dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
>  	if (dest_cpu == smp_processor_id())
>  		goto unlock;
>  
> -	/*
> -	 * select_task_rq() can race against ->cpus_allowed
> -	 */
> -	if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
> -	    likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
> +	if (likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {

If we drop rq_lock, need_migrate_task() maybe return true but
p is already running on other cpu. Thus we do a wrong migration
call.

Thanks,
Yong
>  		struct migration_arg arg = { p, dest_cpu };
>  
> -		task_rq_unlock(rq, &flags);
> -		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
> +		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
> +		stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
>  		return;
>  	}
>  unlock:
> -	task_rq_unlock(rq, &flags);
> +	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
>  }
>  
>  #endif
> @@ -5681,7 +5669,7 @@ int set_cpus_allowed_ptr(struct task_str
>  		goto out;
>  
>  	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
> -	if (migrate_task(p, rq)) {
> +	if (need_migrate_task(p)) {
>  		struct migration_arg arg = { p, dest_cpu };
>  		/* Need help from migration thread: drop lock and wait. */
>  		__task_rq_unlock(rq);
> Index: linux-2.6/kernel/sched_fair.c
> ===================================================================
> --- linux-2.6.orig/kernel/sched_fair.c
> +++ linux-2.6/kernel/sched_fair.c
> @@ -1623,7 +1623,7 @@ static int select_idle_sibling(struct ta
>   * preempt must be disabled.
>   */
>  static int
> -select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
> +select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
>  {
>  	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
>  	int cpu = smp_processor_id();
> Index: linux-2.6/kernel/sched_idletask.c
> ===================================================================
> --- linux-2.6.orig/kernel/sched_idletask.c
> +++ linux-2.6/kernel/sched_idletask.c
> @@ -7,7 +7,7 @@
>  
>  #ifdef CONFIG_SMP
>  static int
> -select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
> +select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
>  {
>  	return task_cpu(p); /* IDLE tasks as never migrated */
>  }
> Index: linux-2.6/kernel/sched_rt.c
> ===================================================================
> --- linux-2.6.orig/kernel/sched_rt.c
> +++ linux-2.6/kernel/sched_rt.c
> @@ -973,11 +973,18 @@ static void yield_task_rt(struct rq *rq)
>  static int find_lowest_rq(struct task_struct *task);
>  
>  static int
> -select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
> +select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
>  {
>  	if (sd_flag != SD_BALANCE_WAKE)
>  		return smp_processor_id();
>  
> +#if 0
> +	/*
> +	 * XXX without holding rq->lock the below is racy, need to
> +	 * rewrite it in a racy but non-dangerous way so that we mostly
> +	 * get the benefit of the heuristic but don't crash the kernel
> +	 * if we get it wrong ;-)
> +	 */
>  	/*
>  	 * If the current task is an RT task, then
>  	 * try to see if we can wake this RT task up on another
> @@ -1002,6 +1009,7 @@ select_task_rq_rt(struct rq *rq, struct 
>  
>  		return (cpu == -1) ? task_cpu(p) : cpu;
>  	}
> +#endif
>  
>  	/*
>  	 * Otherwise, just let it ride on the affined RQ and the
> Index: linux-2.6/kernel/sched_stoptask.c
> ===================================================================
> --- linux-2.6.orig/kernel/sched_stoptask.c
> +++ linux-2.6/kernel/sched_stoptask.c
> @@ -9,8 +9,7 @@
>  
>  #ifdef CONFIG_SMP
>  static int
> -select_task_rq_stop(struct rq *rq, struct task_struct *p,
> -		    int sd_flag, int flags)
> +select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
>  {
>  	return task_cpu(p); /* stop tasks as never migrate */
>  }
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ