lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 14 Apr 2020 16:32:52 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     vpillai <vpillai@...italocean.com>
Cc:     Nishanth Aravamudan <naravamudan@...italocean.com>,
        Julien Desfossez <jdesfossez@...italocean.com>,
        Tim Chen <tim.c.chen@...ux.intel.com>, mingo@...nel.org,
        tglx@...utronix.de, pjt@...gle.com, torvalds@...ux-foundation.org,
        linux-kernel@...r.kernel.org, fweisbec@...il.com,
        keescook@...omium.org, kerrnel@...gle.com,
        Phil Auld <pauld@...hat.com>, Aaron Lu <aaron.lwe@...il.com>,
        Aubrey Li <aubrey.intel@...il.com>, aubrey.li@...ux.intel.com,
        Valentin Schneider <valentin.schneider@....com>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Joel Fernandes <joelaf@...gle.com>, joel@...lfernandes.org
Subject: Re: [RFC PATCH 03/13] sched: Core-wide rq->lock

On Wed, Mar 04, 2020 at 04:59:53PM +0000, vpillai wrote:
> +DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
> +
> +/*
> + * The static-key + stop-machine variable are needed such that:
> + *
> + *	spin_lock(rq_lockp(rq));
> + *	...
> + *	spin_unlock(rq_lockp(rq));
> + *
> + * ends up locking and unlocking the _same_ lock, and all CPUs
> + * always agree on what rq has what lock.
> + *
> + * XXX entirely possible to selectively enable cores, don't bother for now.
> + */
> +static int __sched_core_stopper(void *data)
> +{
> +	bool enabled = !!(unsigned long)data;
> +	int cpu;
> +
> +	for_each_online_cpu(cpu)
> +		cpu_rq(cpu)->core_enabled = enabled;
> +
> +	return 0;
> +}
> +
> +static DEFINE_MUTEX(sched_core_mutex);
> +static int sched_core_count;
> +
> +static void __sched_core_enable(void)
> +{
> +	// XXX verify there are no cookie tasks (yet)
> +
> +	static_branch_enable(&__sched_core_enabled);
> +	stop_machine(__sched_core_stopper, (void *)true, NULL);
> +}
> +
> +static void __sched_core_disable(void)
> +{
> +	// XXX verify there are no cookie tasks (left)
> +
> +	stop_machine(__sched_core_stopper, (void *)false, NULL);
> +	static_branch_disable(&__sched_core_enabled);
> +}

> +static inline raw_spinlock_t *rq_lockp(struct rq *rq)
> +{
> +	if (sched_core_enabled(rq))
> +		return &rq->core->__lock;
> +
> +	return &rq->__lock;
> +}

While reading all this again, I realized it's not too hard to get rid of
stop-machine here.

void __raw_rq_lock(struct rq *rq)
{
	raw_spinlock_t *lock;

	for (;;) {
		lock = rq_lockp(rq);

		raw_spin_lock(lock);
		if (lock == rq_lock(rq))
			return;
		raw_spin_unlock(lock);
	}
}

void __sched_core_enable(int core, bool enable)
{
	const cpumask *smt_mask;
	int cpu, i;

	smt_mask = cpu_smt_mask(core);

	for_each_cpu(cpu, smt_mask)
		raw_spin_lock_nested(&cpu_rq(cpu)->__lock, i++);

	for_each_cpu(cpu, smt_mask)
		cpu_rq(cpu)->core_enabled = enable;

	for_each_cpu(cpu, smt_mask)
		raw_spin_unlock(&cpu_rq(cpu)->__lock);
}


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ