lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 12 Aug 2014 09:55:23 +0200
From:	Peter Zijlstra <peterz@...radead.org>
To:	Kirill Tkhai <ktkhai@...allels.com>
Cc:	linux-kernel@...r.kernel.org, pjt@...gle.com, oleg@...hat.com,
	rostedt@...dmis.org, umgwanakikbuti@...il.com, tkhai@...dex.ru,
	tim.c.chen@...ux.intel.com, mingo@...nel.org,
	nicolas.pitre@...aro.org
Subject: Re: [PATCH v4 3/6] sched: Teach scheduler to understand
 ONRQ_MIGRATING state

On Wed, Aug 06, 2014 at 12:06:19PM +0400, Kirill Tkhai wrote:
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -331,9 +331,13 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
>  	lockdep_assert_held(&p->pi_lock);
>  
>  	for (;;) {
> +		while (unlikely(task_migrating(p)))
> +			cpu_relax();
> +
>  		rq = task_rq(p);
>  		raw_spin_lock(&rq->lock);
> -		if (likely(rq == task_rq(p)))
> +		if (likely(rq == task_rq(p) &&
> +			   !task_migrating(p)))
>  			return rq;
>  		raw_spin_unlock(&rq->lock);
>  	}
> @@ -349,10 +353,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
>  	struct rq *rq;
>  
>  	for (;;) {
> +		while (unlikely(task_migrating(p)))
> +			cpu_relax();
> +
>  		raw_spin_lock_irqsave(&p->pi_lock, *flags);
>  		rq = task_rq(p);
>  		raw_spin_lock(&rq->lock);
> -		if (likely(rq == task_rq(p)))
> +		if (likely(rq == task_rq(p) &&
> +			   !task_migrating(p)))
>  			return rq;
>  		raw_spin_unlock(&rq->lock);
>  		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);

I know I suggested that; but I changed it like the below. The advantage
is of not having two task_migrating() tests on the likely path.

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -331,15 +331,15 @@ static inline struct rq *__task_rq_lock(
 	lockdep_assert_held(&p->pi_lock);
 
 	for (;;) {
-		while (unlikely(task_migrating(p)))
-			cpu_relax();
-
 		rq = task_rq(p);
 		raw_spin_lock(&rq->lock);
 		if (likely(rq == task_rq(p) &&
 			   !task_migrating(p)))
 			return rq;
 		raw_spin_unlock(&rq->lock);
+
+		while (unlikely(task_migrating(p)))
+			cpu_relax();
 	}
 }
 
@@ -353,9 +353,6 @@ static struct rq *task_rq_lock(struct ta
 	struct rq *rq;
 
 	for (;;) {
-		while (unlikely(task_migrating(p)))
-			cpu_relax();
-
 		raw_spin_lock_irqsave(&p->pi_lock, *flags);
 		rq = task_rq(p);
 		raw_spin_lock(&rq->lock);
@@ -364,6 +361,9 @@ static struct rq *task_rq_lock(struct ta
 			return rq;
 		raw_spin_unlock(&rq->lock);
 		raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+		while (unlikely(task_migrating(p)))
+			cpu_relax();
 	}
 }
 

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ