lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 11 Apr 2011 14:36:37 GMT
From:	tip-bot for Peter Zijlstra <a.p.zijlstra@...llo.nl>
To:	linux-tip-commits@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, hpa@...or.com, mingo@...hat.com,
	torvalds@...ux-foundation.org, a.p.zijlstra@...llo.nl,
	efault@....de, npiggin@...nel.dk, akpm@...ux-foundation.org,
	tglx@...utronix.de, mingo@...e.hu
Subject: [tip:sched/domains] sched: Simplify finding the lowest sched_domain

Commit-ID:  21d42ccfd6c6c11f96c2acfd32a85cfc33514d3a
Gitweb:     http://git.kernel.org/tip/21d42ccfd6c6c11f96c2acfd32a85cfc33514d3a
Author:     Peter Zijlstra <a.p.zijlstra@...llo.nl>
AuthorDate: Thu, 7 Apr 2011 14:09:48 +0200
Committer:  Ingo Molnar <mingo@...e.hu>
CommitDate: Mon, 11 Apr 2011 12:58:19 +0200

sched: Simplify finding the lowest sched_domain

Instead of relying on knowing the build order and various CONFIG_
flags simply remember the bottom most sched_domain when we created the
domain hierarchy.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc: Mike Galbraith <efault@....de>
Cc: Nick Piggin <npiggin@...nel.dk>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.134511046@chello.nl
Signed-off-by: Ingo Molnar <mingo@...e.hu>
---
 kernel/sched.c |   23 +++++++++++++----------
 1 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index e66d24a..d6992bf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6865,11 +6865,13 @@ struct s_data {
 	cpumask_var_t		nodemask;
 	cpumask_var_t		send_covered;
 	cpumask_var_t		tmpmask;
+	struct sched_domain ** __percpu sd;
 	struct root_domain	*rd;
 };
 
 enum s_alloc {
 	sa_rootdomain,
+	sa_sd,
 	sa_tmpmask,
 	sa_send_covered,
 	sa_nodemask,
@@ -7104,6 +7106,8 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
 	switch (what) {
 	case sa_rootdomain:
 		free_rootdomain(d->rd); /* fall through */
+	case sa_sd:
+		free_percpu(d->sd); /* fall through */
 	case sa_tmpmask:
 		free_cpumask_var(d->tmpmask); /* fall through */
 	case sa_send_covered:
@@ -7124,10 +7128,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
 		return sa_nodemask;
 	if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
 		return sa_send_covered;
+	d->sd = alloc_percpu(struct sched_domain *);
+	if (!d->sd) {
+		printk(KERN_WARNING "Cannot alloc per-cpu pointers\n");
+		return sa_tmpmask;
+	}
 	d->rd = alloc_rootdomain();
 	if (!d->rd) {
 		printk(KERN_WARNING "Cannot alloc root domain\n");
-		return sa_tmpmask;
+		return sa_sd;
 	}
 	return sa_rootdomain;
 }
@@ -7316,6 +7325,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
 		sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
 		sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
 
+		*per_cpu_ptr(d.sd, i) = sd;
+
 		for (tmp = sd; tmp; tmp = tmp->parent) {
 			tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
 			build_sched_groups(&d, tmp, cpu_map, i);
@@ -7363,15 +7374,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
 
 	/* Attach the domains */
 	for_each_cpu(i, cpu_map) {
-#ifdef CONFIG_SCHED_SMT
-		sd = &per_cpu(cpu_domains, i).sd;
-#elif defined(CONFIG_SCHED_MC)
-		sd = &per_cpu(core_domains, i).sd;
-#elif defined(CONFIG_SCHED_BOOK)
-		sd = &per_cpu(book_domains, i).sd;
-#else
-		sd = &per_cpu(phys_domains, i).sd;
-#endif
+		sd = *per_cpu_ptr(d.sd, i);
 		cpu_attach_domain(sd, d.rd, i);
 	}
 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ