lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <75730c46f226722b71dd1ec9634a1eb90017f116.camel@linux.intel.com>
Date:   Thu, 16 Mar 2023 15:49:31 -0700
From:   Tim Chen <tim.c.chen@...ux.intel.com>
To:     Peter Zijlstra <peterz@...radead.org>, mingo@...nel.org,
        vincent.guittot@...aro.org
Cc:     linux-kernel@...r.kernel.org, juri.lelli@...hat.com,
        dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
        mgorman@...e.de, bristot@...hat.com, corbet@....net,
        qyousef@...alina.io, chris.hyser@...cle.com,
        patrick.bellasi@...bug.net, pjt@...gle.com, pavel@....cz,
        qperret@...gle.com, joshdon@...gle.com, timj@....org,
        kprateek.nayak@....com, yu.c.chen@...el.com,
        youssefesmat@...omium.org, joel@...lfernandes.org
Subject: Re: [PATCH 08/10] sched/fair: Add lag based placement

On Mon, 2023-03-06 at 14:25 +0100, Peter Zijlstra wrote:
> With the introduction of avg_vruntime, it is possible to approximate
> lag (the entire purpose of introducing it in fact). Use this to do lag
> based placement over sleep+wake.
> 
> Specifically, the FAIR_SLEEPERS thing places things too far to the
> left and messes up the deadline aspect of EEVDF.
> 
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
>  include/linux/sched.h   |    1 
>  kernel/sched/core.c     |    1 
>  kernel/sched/fair.c     |   63 +++++++++++++++++++++++++++---------------------
>  kernel/sched/features.h |    8 ++++++
>  4 files changed, 46 insertions(+), 27 deletions(-)
> 
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -555,6 +555,7 @@ struct sched_entity {
>  	u64				sum_exec_runtime;
>  	u64				vruntime;
>  	u64				prev_sum_exec_runtime;
> +	s64				lag;
>  
>  	u64				nr_migrations;
>  
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -4436,6 +4436,7 @@ static void __sched_fork(unsigned long c
>  	p->se.prev_sum_exec_runtime	= 0;
>  	p->se.nr_migrations		= 0;
>  	p->se.vruntime			= 0;
> +	p->se.lag			= 0;
>  	INIT_LIST_HEAD(&p->se.group_node);
>  
>  	set_latency_offset(p);
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -4749,39 +4749,45 @@ static void
>  place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
>  {
>  	u64 vruntime = avg_vruntime(cfs_rq);
> -	u64 sleep_time;
>  
> -	/* sleeps up to a single latency don't count. */
> -	if (!initial) {
> -		unsigned long thresh;
> -
> -		if (se_is_idle(se))
> -			thresh = sysctl_sched_min_granularity;
> -		else
> -			thresh = sysctl_sched_latency;
> +	if (sched_feat(FAIR_SLEEPERS)) {
> +		u64 sleep_time;
> +
> +		/* sleeps up to a single latency don't count. */
> +		if (!initial) {
> +			unsigned long thresh;
> +
> +			if (se_is_idle(se))
> +				thresh = sysctl_sched_min_granularity;
> +			else
> +				thresh = sysctl_sched_latency;
> +
> +			/*
> +			 * Halve their sleep time's effect, to allow
> +			 * for a gentler effect of sleepers:
> +			 */
> +			if (sched_feat(GENTLE_FAIR_SLEEPERS))
> +				thresh >>= 1;
> +
> +			vruntime -= thresh;
> +		}
>  
>  		/*
> -		 * Halve their sleep time's effect, to allow
> -		 * for a gentler effect of sleepers:
> +		 * Pull vruntime of the entity being placed to the base level of
> +		 * cfs_rq, to prevent boosting it if placed backwards.  If the entity
> +		 * slept for a long time, don't even try to compare its vruntime with
> +		 * the base as it may be too far off and the comparison may get
> +		 * inversed due to s64 overflow.
>  		 */
> -		if (sched_feat(GENTLE_FAIR_SLEEPERS))
> -			thresh >>= 1;
> -
> -		vruntime -= thresh;
> +		sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
> +		if ((s64)sleep_time < 60LL * NSEC_PER_SEC)
> +			vruntime = max_vruntime(se->vruntime, vruntime);
>  	}
>  
> -	/*
> -	 * Pull vruntime of the entity being placed to the base level of
> -	 * cfs_rq, to prevent boosting it if placed backwards.  If the entity
> -	 * slept for a long time, don't even try to compare its vruntime with
> -	 * the base as it may be too far off and the comparison may get
> -	 * inversed due to s64 overflow.
> -	 */
> -	sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
> -	if ((s64)sleep_time > 60LL * NSEC_PER_SEC)
> -		se->vruntime = vruntime;
> -	else
> -		se->vruntime = max_vruntime(se->vruntime, vruntime);
> +	if (sched_feat(PRESERVE_LAG))
> +		vruntime -= se->lag;
> +
> +	se->vruntime = vruntime;

I was going to say that
when we migrate a task to a new runqueue, we subtract vruntime
by old queue's min_vruntime and that math needs update.

But then I saw you did that in 
https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git/commit/kernel/sched?h=sched/eevdf

With this new lag based placement, I think it should properly fix
the starvation issues we have seen caused by quickly cpu hopping tasks. 

Tim

>  }
>  
>  static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
> @@ -4949,6 +4955,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
>  
>  	clear_buddies(cfs_rq, se);
>  
> +	if (sched_feat(PRESERVE_LAG) && (flags & DEQUEUE_SLEEP))
> +		se->lag = avg_vruntime(cfs_rq) - se->vruntime;
> +
>  	if (se != cfs_rq->curr)
>  		__dequeue_entity(cfs_rq, se);
>  	se->on_rq = 0;
> --- a/kernel/sched/features.h
> +++ b/kernel/sched/features.h
> @@ -1,12 +1,20 @@
>  /* SPDX-License-Identifier: GPL-2.0 */
> +
>  /*
>   * Only give sleepers 50% of their service deficit. This allows
>   * them to run sooner, but does not allow tons of sleepers to
>   * rip the spread apart.
>   */
> +SCHED_FEAT(FAIR_SLEEPERS, false)
>  SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
>  
>  /*
> + * Using the avg_vruntime, do the right thing and preserve lag
> + * across sleep+wake cycles.
> + */
> +SCHED_FEAT(PRESERVE_LAG, true)
> +
> +/*
>   * Prefer to schedule the task we woke last (assuming it failed
>   * wakeup-preemption), since its likely going to consume data we
>   * touched, increases cache locality.
> 
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ