lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 14 Apr 2020 15:35:59 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     vpillai <vpillai@...italocean.com>
Cc:     Nishanth Aravamudan <naravamudan@...italocean.com>,
        Julien Desfossez <jdesfossez@...italocean.com>,
        Tim Chen <tim.c.chen@...ux.intel.com>, mingo@...nel.org,
        tglx@...utronix.de, pjt@...gle.com, torvalds@...ux-foundation.org,
        linux-kernel@...r.kernel.org, fweisbec@...il.com,
        keescook@...omium.org, kerrnel@...gle.com,
        Phil Auld <pauld@...hat.com>, Aaron Lu <aaron.lwe@...il.com>,
        Aubrey Li <aubrey.intel@...il.com>, aubrey.li@...ux.intel.com,
        Valentin Schneider <valentin.schneider@....com>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Joel Fernandes <joelaf@...gle.com>, joel@...lfernandes.org,
        Aaron Lu <aaron.lu@...ux.alibaba.com>
Subject: Re: [RFC PATCH 07/13] sched: Add core wide task selection and
 scheduling.

On Wed, Mar 04, 2020 at 04:59:57PM +0000, vpillai wrote:
> +static struct task_struct *
> +pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
> +{
> +	struct task_struct *next, *max = NULL;
> +	const struct sched_class *class;
> +	const struct cpumask *smt_mask;
> +	int i, j, cpu;
> +	bool need_sync = false;

AFAICT that assignment is superfluous. Also, you violated the inverse
x-mas tree.

> +
> +	cpu = cpu_of(rq);
> +	if (cpu_is_offline(cpu))
> +		return idle_sched_class.pick_next_task(rq);

Are we actually hitting this one?

> +	if (!sched_core_enabled(rq))
> +		return __pick_next_task(rq, prev, rf);
> +
> +	/*
> +	 * If there were no {en,de}queues since we picked (IOW, the task
> +	 * pointers are all still valid), and we haven't scheduled the last
> +	 * pick yet, do so now.
> +	 */
> +	if (rq->core->core_pick_seq == rq->core->core_task_seq &&
> +	    rq->core->core_pick_seq != rq->core_sched_seq) {
> +		WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
> +
> +		next = rq->core_pick;
> +		if (next != prev) {
> +			put_prev_task(rq, prev);
> +			set_next_task(rq, next);
> +		}
> +		return next;
> +	}
> +
> +	prev->sched_class->put_prev_task(rq, prev);
> +	if (!rq->nr_running)
> +		newidle_balance(rq, rf);

This is wrong per commit:

  6e2df0581f56 ("sched: Fix pick_next_task() vs 'change' pattern race")

> +	smt_mask = cpu_smt_mask(cpu);
> +
> +	/*
> +	 * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq
> +	 *
> +	 * @task_seq guards the task state ({en,de}queues)
> +	 * @pick_seq is the @task_seq we did a selection on
> +	 * @sched_seq is the @pick_seq we scheduled
> +	 *
> +	 * However, preemptions can cause multiple picks on the same task set.
> +	 * 'Fix' this by also increasing @task_seq for every pick.
> +	 */
> +	rq->core->core_task_seq++;
> +	need_sync = !!rq->core->core_cookie;
> +
> +	/* reset state */
> +	rq->core->core_cookie = 0UL;
> +	for_each_cpu(i, smt_mask) {
> +		struct rq *rq_i = cpu_rq(i);
> +
> +		rq_i->core_pick = NULL;
> +
> +		if (rq_i->core_forceidle) {
> +			need_sync = true;
> +			rq_i->core_forceidle = false;
> +		}
> +
> +		if (i != cpu)
> +			update_rq_clock(rq_i);
> +	}
> +
> +	/*
> +	 * Try and select tasks for each sibling in decending sched_class
> +	 * order.
> +	 */
> +	for_each_class(class) {
> +again:
> +		for_each_cpu_wrap(i, smt_mask, cpu) {
> +			struct rq *rq_i = cpu_rq(i);
> +			struct task_struct *p;
> +
> +			if (cpu_is_offline(i)) {
> +				rq_i->core_pick = rq_i->idle;
> +				continue;
> +			}

Why are we polluting the 'fast' path with offline crud? Why isn't this
the natural result of running pick_task() on an empty runqueue?

> +
> +			if (rq_i->core_pick)
> +				continue;
> +
> +			/*
> +			 * If this sibling doesn't yet have a suitable task to
> +			 * run; ask for the most elegible task, given the
> +			 * highest priority task already selected for this
> +			 * core.
> +			 */
> +			p = pick_task(rq_i, class, max);
> +			if (!p) {
> +				/*
> +				 * If there weren't no cookies; we don't need
> +				 * to bother with the other siblings.
> +				 */
> +				if (i == cpu && !need_sync)
> +					goto next_class;
> +
> +				continue;
> +			}
> +
> +			/*
> +			 * Optimize the 'normal' case where there aren't any
> +			 * cookies and we don't need to sync up.
> +			 */
> +			if (i == cpu && !need_sync && !p->core_cookie) {
> +				next = p;
> +				goto done;
> +			}
> +
> +			rq_i->core_pick = p;
> +
> +			/*
> +			 * If this new candidate is of higher priority than the
> +			 * previous; and they're incompatible; we need to wipe
> +			 * the slate and start over. pick_task makes sure that
> +			 * p's priority is more than max if it doesn't match
> +			 * max's cookie.
> +			 *
> +			 * NOTE: this is a linear max-filter and is thus bounded
> +			 * in execution time.
> +			 */
> +			if (!max || !cookie_match(max, p)) {
> +				struct task_struct *old_max = max;
> +
> +				rq->core->core_cookie = p->core_cookie;
> +				max = p;
> +
> +				if (old_max) {
> +					for_each_cpu(j, smt_mask) {
> +						if (j == i)
> +							continue;
> +
> +						cpu_rq(j)->core_pick = NULL;
> +					}
> +					goto again;
> +				} else {
> +					/*
> +					 * Once we select a task for a cpu, we
> +					 * should not be doing an unconstrained
> +					 * pick because it might starve a task
> +					 * on a forced idle cpu.
> +					 */
> +					need_sync = true;
> +				}
> +
> +			}
> +		}
> +next_class:;
> +	}
> +
> +	rq->core->core_pick_seq = rq->core->core_task_seq;
> +	next = rq->core_pick;
> +	rq->core_sched_seq = rq->core->core_pick_seq;
> +
> +	/*
> +	 * Reschedule siblings
> +	 *
> +	 * NOTE: L1TF -- at this point we're no longer running the old task and
> +	 * sending an IPI (below) ensures the sibling will no longer be running
> +	 * their task. This ensures there is no inter-sibling overlap between
> +	 * non-matching user state.
> +	 */
> +	for_each_cpu(i, smt_mask) {
> +		struct rq *rq_i = cpu_rq(i);
> +
> +		if (cpu_is_offline(i))
> +			continue;

Another one; please explain how an offline cpu can be part of the
smt_mask. Last time I checked it got cleared in stop-machine.

> +
> +		WARN_ON_ONCE(!rq_i->core_pick);
> +
> +		if (is_idle_task(rq_i->core_pick) && rq_i->nr_running)
> +			rq_i->core_forceidle = true;
> +
> +		if (i == cpu)
> +			continue;
> +
> +		if (rq_i->curr != rq_i->core_pick)
> +			resched_curr(rq_i);
> +
> +		/* Did we break L1TF mitigation requirements? */
> +		WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick));

That comment is misleading...

> +	}
> +
> +done:
> +	set_next_task(rq, next);
> +	return next;
> +}

----8<----

> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index a9eeef896c78..8432de767730 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -4080,6 +4080,13 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>  		update_min_vruntime(cfs_rq);
>  }
>  
> +static inline bool
> +__entity_slice_used(struct sched_entity *se)
> +{
> +	return (se->sum_exec_runtime - se->prev_sum_exec_runtime) >
> +		sched_slice(cfs_rq_of(se), se);
> +}
> +
>  /*
>   * Preempt the current task with a newly woken task if needed:
>   */
> @@ -10285,6 +10292,34 @@ static void core_sched_deactivate_fair(struct rq *rq)
>  #endif
>  #endif /* CONFIG_SMP */
>  
> +#ifdef CONFIG_SCHED_CORE
> +/*
> + * If runqueue has only one task which used up its slice and
> + * if the sibling is forced idle, then trigger schedule
> + * to give forced idle task a chance.
> + */
> +static void resched_forceidle_sibling(struct rq *rq, struct sched_entity *se)
> +{
> +	int cpu = cpu_of(rq), sibling_cpu;
> +	if (rq->cfs.nr_running > 1 || !__entity_slice_used(se))
> +		return;
> +
> +	for_each_cpu(sibling_cpu, cpu_smt_mask(cpu)) {
> +		struct rq *sibling_rq;
> +		if (sibling_cpu == cpu)
> +			continue;
> +		if (cpu_is_offline(sibling_cpu))
> +			continue;
> +
> +		sibling_rq = cpu_rq(sibling_cpu);
> +		if (sibling_rq->core_forceidle) {
> +			resched_curr(sibling_rq);
> +		}
> +	}
> +}
> +#endif
> +
> +
>  /*
>   * scheduler tick hitting a task of our scheduling class.
>   *
> @@ -10308,6 +10343,11 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
>  
>  	update_misfit_status(curr, rq);
>  	update_overutilized_status(task_rq(curr));
> +
> +#ifdef CONFIG_SCHED_CORE
> +	if (sched_core_enabled(rq))
> +		resched_forceidle_sibling(rq, &curr->se);
> +#endif
>  }
>  
>  /*

This ^ seems like it should be in it's own patch.

> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 03d502357599..a829e26fa43a 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1003,11 +1003,16 @@ struct rq {
>  #ifdef CONFIG_SCHED_CORE
>  	/* per rq */
>  	struct rq		*core;
> +	struct task_struct	*core_pick;
>  	unsigned int		core_enabled;
> +	unsigned int		core_sched_seq;
>  	struct rb_root		core_tree;
> +	bool			core_forceidle;

Someone forgot that _Bool shouldn't be part of composite types?

>  	/* shared state */
>  	unsigned int		core_task_seq;
> +	unsigned int		core_pick_seq;
> +	unsigned long		core_cookie;
>  #endif
>  };

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ