lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150206160934.0663505e@gandalf.local.home>
Date:	Fri, 6 Feb 2015 16:09:34 -0500
From:	Steven Rostedt <rostedt@...dmis.org>
To:	Xunlei Pang <xlpang@....com>
Cc:	linux-kernel@...r.kernel.org,
	Peter Zijlstra <peterz@...radead.org>,
	Juri Lelli <juri.lelli@...il.com>,
	Xunlei Pang <pang.xunlei@...aro.org>
Subject: Re: [PATCH v2 1/2] sched/rt: Check to push the task when changing
 its affinity

On Thu,  5 Feb 2015 23:59:33 +0800
Xunlei Pang <xlpang@....com> wrote:
  	return p;
> @@ -1886,28 +1892,73 @@ static void set_cpus_allowed_rt(struct task_struct *p,
>  				const struct cpumask *new_mask)
>  {
>  	struct rq *rq;
> -	int weight;
> +	int old_weight, new_weight;
> +	int preempt_push = 0, direct_push = 0;
>  
>  	BUG_ON(!rt_task(p));
>  
>  	if (!task_on_rq_queued(p))
>  		return;
>  
> -	weight = cpumask_weight(new_mask);
> +	old_weight = p->nr_cpus_allowed;
> +	new_weight = cpumask_weight(new_mask);
> +
> +	rq = task_rq(p);
> +
> +	if (new_weight > 1 &&
> +	    rt_task(rq->curr) &&
> +	    !test_tsk_need_resched(rq->curr)) {
> +		/*
> +		 * Set new mask information which is already valid
> +		 * to prepare pushing.
> +		 *
> +		 * We own p->pi_lock and rq->lock. rq->lock might
> +		 * get released when doing direct pushing, however
> +		 * p->pi_lock is always held, so it's safe to assign
> +		 * the new_mask and new_weight to p.
> +		 */
> +		cpumask_copy(&p->cpus_allowed, new_mask);
> +		p->nr_cpus_allowed = new_weight;
> +
> +		if (task_running(rq, p) &&
> +		    cpumask_test_cpu(task_cpu(p), new_mask) &&

Why the check for task_cpu being in new_mask?

> +		    cpupri_find(&rq->rd->cpupri, p, NULL)) {
> +			/*
> +			 * At this point, current task gets migratable most
> +			 * likely due to the change of its affinity, let's
> +			 * figure out if we can migrate it.
> +			 *
> +			 * Is there any task with the same priority as that
> +			 * of current task? If found one, we should resched.
> +			 * NOTE: The target may be unpushable.
> +			 */
> +			if (p->prio == rq->rt.highest_prio.next) {
> +				/* One target just in pushable_tasks list. */
> +				requeue_task_rt(rq, p, 0);
> +				preempt_push = 1;
> +			} else if (rq->rt.rt_nr_total > 1) {
> +				struct task_struct *next;
> +
> +				requeue_task_rt(rq, p, 0);
> +				next = peek_next_task_rt(rq);
> +				if (next != p && next->prio == p->prio)
> +					preempt_push = 1;
> +			}
> +		} else if (!task_running(rq, p))
> +			direct_push = 1;

We could avoid the second check (!task_running()) by splitting up the
first if:

	if (task_running(rq, p)) {
		if (cpumask_test_cpu() && cpupri_find()) {
		}
	} else {
		direct push = 1

Also, is the copy of cpus_allowed only done so that cpupri_find is
called? If so maybe move it in there too:

	if (task_running(rq, p)) {
		if (!cpumask_test_cpu())
			goto update;

		cpumask_copy(&p->cpus_allowed, new_mask);
		p->nr_cpus_allowed = new_weight;

		if (!cpupri_find())
			goto update;

		[...]

This way we avoid the double copy of cpumask unless we truly need to do
it.

> +	}
>  
>  	/*
>  	 * Only update if the process changes its state from whether it
>  	 * can migrate or not.
>  	 */
> -	if ((p->nr_cpus_allowed > 1) == (weight > 1))
> -		return;
> -
> -	rq = task_rq(p);
> +	if ((old_weight > 1) == (new_weight > 1))
> +		goto out;
>  
>  	/*
>  	 * The process used to be able to migrate OR it can now migrate
>  	 */
> -	if (weight <= 1) {
> +	if (new_weight <= 1) {
>  		if (!task_current(rq, p))
>  			dequeue_pushable_task(rq, p);
>  		BUG_ON(!rq->rt.rt_nr_migratory);
> @@ -1919,6 +1970,15 @@ static void set_cpus_allowed_rt(struct task_struct *p,
>  	}
>  
>  	update_rt_migration(&rq->rt);
> +
> +out:
> +	BUG_ON(direct_push == 1 && preempt_push == 1);

Do we really need this bug on?

> +
> +	if (direct_push)
> +		push_rt_tasks(rq);
> +
> +	if (preempt_push)

We could make that an "else if" if they really are mutually exclusive.

-- Steve

> +		resched_curr(rq);
>  }
>  
>  /* Assumes rq->lock is held */

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ