lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250521092155.GA24746@bytedance>
Date: Wed, 21 May 2025 17:21:55 +0800
From: Aaron Lu <ziqianlu@...edance.com>
To: Chengming Zhou <chengming.zhou@...ux.dev>
Cc: Valentin Schneider <vschneid@...hat.com>,
	Ben Segall <bsegall@...gle.com>,
	K Prateek Nayak <kprateek.nayak@....com>,
	Peter Zijlstra <peterz@...radead.org>,
	Josh Don <joshdon@...gle.com>, Ingo Molnar <mingo@...hat.com>,
	Vincent Guittot <vincent.guittot@...aro.org>,
	Xi Wang <xii@...gle.com>, linux-kernel@...r.kernel.org,
	Juri Lelli <juri.lelli@...hat.com>,
	Dietmar Eggemann <dietmar.eggemann@....com>,
	Steven Rostedt <rostedt@...dmis.org>, Mel Gorman <mgorman@...e.de>,
	Chuyi Zhou <zhouchuyi@...edance.com>,
	Jan Kiszka <jan.kiszka@...mens.com>,
	Florian Bezdeka <florian.bezdeka@...mens.com>
Subject: Re: [External] Re: [PATCH 2/7] sched/fair: prepare throttle path for
 task based throttle

On Wed, May 21, 2025 at 05:01:58PM +0800, Chengming Zhou wrote:
> On 2025/5/20 18:41, Aaron Lu wrote:
> > From: Valentin Schneider <vschneid@...hat.com>
> > 
> > In current throttle model, when a cfs_rq is throttled, its entity will
> > be dequeued from cpu's rq, making tasks attached to it not able to run,
> > thus achiveing the throttle target.
> > 
> > This has a drawback though: assume a task is a reader of percpu_rwsem
> > and is waiting. When it gets wakeup, it can not run till its task group's
> > next period comes, which can be a relatively long time. Waiting writer
> > will have to wait longer due to this and it also makes further reader
> > build up and eventually trigger task hung.
> > 
> > To improve this situation, change the throttle model to task based, i.e.
> > when a cfs_rq is throttled, record its throttled status but do not remove
> > it from cpu's rq. Instead, for tasks that belong to this cfs_rq, when
> > they get picked, add a task work to them so that when they return
> > to user, they can be dequeued. In this way, tasks throttled will not
> > hold any kernel resources.
> > 
> > To avoid breaking bisect, preserve the current throttle behavior by
> > still dequeuing throttled hierarchy from rq and because of this, no task
> > can have that throttle task work added yet. The throttle model will
> > switch to task based in a later patch.
> > 
> > Suggested-by: Chengming Zhou <chengming.zhou@...ux.dev> # tag on pick
> > Signed-off-by: Valentin Schneider <vschneid@...hat.com>
> > Signed-off-by: Aaron Lu <ziqianlu@...edance.com>
> 
> I'm wondering how about put 02-04 patches together, since it's strange
> to setup task work in this patch but without changing throttle_cfs_rq(),

Do you mean 02-05?
Because the actual change to throttle_cfs_rq() happens in patch5 :)

> which makes the reviewing process a bit confused? WDYT?

Yes, I agree it looks a bit confused.

The point is to not break bisect while make review easier; if merging
all task based throttle related patches together, that would be to put
patch 02-05 together, which seems too big?

Thanks,
Aaron

> > ---
> >   kernel/sched/fair.c  | 88 +++++++++++++++++++++++++++++++++++++++-----
> >   kernel/sched/sched.h |  1 +
> >   2 files changed, 80 insertions(+), 9 deletions(-)
> > 
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 75bf6186a5137..e87ceb0a2d37f 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -5825,8 +5825,47 @@ static inline int throttled_lb_pair(struct task_group *tg,
> >   	       throttled_hierarchy(dest_cfs_rq);
> >   }
> > +static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags);
> >   static void throttle_cfs_rq_work(struct callback_head *work)
> >   {
> > +	struct task_struct *p = container_of(work, struct task_struct, sched_throttle_work);
> > +	struct sched_entity *se;
> > +	struct cfs_rq *cfs_rq;
> > +	struct rq *rq;
> > +
> > +	WARN_ON_ONCE(p != current);
> > +	p->sched_throttle_work.next = &p->sched_throttle_work;
> > +
> > +	/*
> > +	 * If task is exiting, then there won't be a return to userspace, so we
> > +	 * don't have to bother with any of this.
> > +	 */
> > +	if ((p->flags & PF_EXITING))
> > +		return;
> > +
> > +	scoped_guard(task_rq_lock, p) {
> > +		se = &p->se;
> > +		cfs_rq = cfs_rq_of(se);
> > +
> > +		/* Raced, forget */
> > +		if (p->sched_class != &fair_sched_class)
> > +			return;
> > +
> > +		/*
> > +		 * If not in limbo, then either replenish has happened or this
> > +		 * task got migrated out of the throttled cfs_rq, move along.
> > +		 */
> > +		if (!cfs_rq->throttle_count)
> > +			return;
> > +		rq = scope.rq;
> > +		update_rq_clock(rq);
> > +		WARN_ON_ONCE(!list_empty(&p->throttle_node));
> > +		dequeue_task_fair(rq, p, DEQUEUE_SLEEP | DEQUEUE_SPECIAL);
> > +		list_add(&p->throttle_node, &cfs_rq->throttled_limbo_list);
> > +		resched_curr(rq);
> > +	}
> > +
> > +	cond_resched_tasks_rcu_qs();
> >   }
> >   void init_cfs_throttle_work(struct task_struct *p)
> > @@ -5866,21 +5905,42 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
> >   	return 0;
> >   }
> > +static inline bool task_has_throttle_work(struct task_struct *p)
> > +{
> > +	return p->sched_throttle_work.next != &p->sched_throttle_work;
> > +}
> > +
> > +static inline void task_throttle_setup_work(struct task_struct *p)
> > +{
> > +	if (task_has_throttle_work(p))
> > +		return;
> > +
> > +	/*
> > +	 * Kthreads and exiting tasks don't return to userspace, so adding the
> > +	 * work is pointless
> > +	 */
> > +	if ((p->flags & (PF_EXITING | PF_KTHREAD)))
> > +		return;
> > +
> > +	task_work_add(p, &p->sched_throttle_work, TWA_RESUME);
> > +}
> > +
> >   static int tg_throttle_down(struct task_group *tg, void *data)
> >   {
> >   	struct rq *rq = data;
> >   	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
> > +	cfs_rq->throttle_count++;
> > +	if (cfs_rq->throttle_count > 1)
> > +		return 0;
> > +
> >   	/* group is entering throttled state, stop time */
> > -	if (!cfs_rq->throttle_count) {
> > -		cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
> > -		list_del_leaf_cfs_rq(cfs_rq);
> > +	cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
> > +	list_del_leaf_cfs_rq(cfs_rq);
> > -		WARN_ON_ONCE(cfs_rq->throttled_clock_self);
> > -		if (cfs_rq->nr_queued)
> > -			cfs_rq->throttled_clock_self = rq_clock(rq);
> > -	}
> > -	cfs_rq->throttle_count++;
> > +	WARN_ON_ONCE(cfs_rq->throttled_clock_self);
> > +	if (cfs_rq->nr_queued)
> > +		cfs_rq->throttled_clock_self = rq_clock(rq);
> >   	return 0;
> >   }
> > @@ -6575,6 +6635,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
> >   	cfs_rq->runtime_enabled = 0;
> >   	INIT_LIST_HEAD(&cfs_rq->throttled_list);
> >   	INIT_LIST_HEAD(&cfs_rq->throttled_csd_list);
> > +	INIT_LIST_HEAD(&cfs_rq->throttled_limbo_list);
> >   }
> >   void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
> > @@ -6744,6 +6805,7 @@ static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
> >   static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
> >   static inline void sync_throttle(struct task_group *tg, int cpu) {}
> >   static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
> > +static void task_throttle_setup_work(struct task_struct *p) {}
> >   static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
> >   {
> > @@ -8851,6 +8913,7 @@ static struct task_struct *pick_task_fair(struct rq *rq)
> >   {
> >   	struct sched_entity *se;
> >   	struct cfs_rq *cfs_rq;
> > +	struct task_struct *p;
> >   again:
> >   	cfs_rq = &rq->cfs;
> > @@ -8871,7 +8934,14 @@ static struct task_struct *pick_task_fair(struct rq *rq)
> >   		cfs_rq = group_cfs_rq(se);
> >   	} while (cfs_rq);
> > -	return task_of(se);
> > +	p = task_of(se);
> > +	if (throttled_hierarchy(cfs_rq_of(se))) {
> > +		/* Shuold not happen for now */
> > +		WARN_ON_ONCE(1);
> > +		task_throttle_setup_work(p);
> > +	}
> > +
> > +	return p;
> >   }
> >   static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
> > diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> > index 921527327f107..83f16fc44884f 100644
> > --- a/kernel/sched/sched.h
> > +++ b/kernel/sched/sched.h
> > @@ -736,6 +736,7 @@ struct cfs_rq {
> >   	int			throttle_count;
> >   	struct list_head	throttled_list;
> >   	struct list_head	throttled_csd_list;
> > +	struct list_head        throttled_limbo_list;
> >   #endif /* CONFIG_CFS_BANDWIDTH */
> >   #endif /* CONFIG_FAIR_GROUP_SCHED */
> >   };

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ