lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 22 Jun 2017 10:38:30 +0200
From:   Ingo Molnar <mingo@...nel.org>
To:     Daniel Bristot de Oliveira <bristot@...hat.com>
Cc:     linux-rt-users <linux-rt-users@...r.kernel.org>,
        "Luis Claudio R . Goncalves" <lgoncalv@...hat.com>,
        Clark Williams <williams@...hat.com>,
        Luiz Capitulino <lcapitulino@...hat.com>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        Thomas Gleixner <tglx@...utronix.de>,
        Steven Rostedt <rostedt@...dmis.org>,
        Peter Zijlstra <peterz@...radead.org>,
        LKML <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 2/2] rt: Increase/decrease the nr of migratory tasks when
 enabling/disabling migration


* Daniel Bristot de Oliveira <bristot@...hat.com> wrote:

>  void migrate_disable(void)
>  {
>  	struct task_struct *p = current;
> +	struct rq *rq;
> +	struct rq_flags rf;
> +
>  
>  	if (in_atomic() || irqs_disabled()) {
>  #ifdef CONFIG_SCHED_DEBUG
> @@ -7593,10 +7596,21 @@ void migrate_disable(void)
>  	preempt_disable();
>  	preempt_lazy_disable();
>  	pin_current_cpu();
> -	p->migrate_disable = 1;
>  
> -	p->cpus_ptr = cpumask_of(smp_processor_id());
> +	rq = task_rq_lock(p, &rf);
> +	if (unlikely((p->sched_class == &rt_sched_class ||
> +		      p->sched_class == &dl_sched_class) &&
> +		      p->nr_cpus_allowed > 1)) {
> +		if (p->sched_class == &rt_sched_class)
> +			task_rq(p)->rt.rt_nr_migratory--;
> +		else
> +			task_rq(p)->dl.dl_nr_migratory--;
> +	}
>  	p->nr_cpus_allowed = 1;
> +	task_rq_unlock(rq, p, &rf);
> +	p->cpus_ptr = cpumask_of(smp_processor_id());
> +	p->migrate_disable = 1;
> +
>  
>  	preempt_enable();
>  }
> @@ -7605,6 +7619,9 @@ EXPORT_SYMBOL(migrate_disable);
>  void migrate_enable(void)
>  {
>  	struct task_struct *p = current;
> +	struct rq *rq;
> +	struct rq_flags rf;
> +
>  
>  	if (in_atomic() || irqs_disabled()) {
>  #ifdef CONFIG_SCHED_DEBUG
> @@ -7628,17 +7645,24 @@ void migrate_enable(void)
>  
>  	preempt_disable();
>  
> -	p->cpus_ptr = &p->cpus_mask;
> -	p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
>  	p->migrate_disable = 0;
> +	p->cpus_ptr = &p->cpus_mask;
>  
> -	if (p->migrate_disable_update) {
> -		struct rq *rq;
> -		struct rq_flags rf;
> +	rq = task_rq_lock(p, &rf);
> +	p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
> +	if (unlikely((p->sched_class == &rt_sched_class ||
> +		      p->sched_class == &dl_sched_class) &&
> +		      p->nr_cpus_allowed > 1)) {
> +		if (p->sched_class == &rt_sched_class)
> +			task_rq(p)->rt.rt_nr_migratory++;
> +		else
> +			task_rq(p)->dl.dl_nr_migratory++;
> +	}
> +	task_rq_unlock(rq, p, &rf);

The fix looks good to me, but AFAICS the repeat pattern introduced here could be 
factored out into a helper function instead, right?

Thanks,

	Ingo

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ