lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-bf28b253266ebd73c331dde24d64606afde32ceb@git.kernel.org>
Date:	Mon, 11 Apr 2011 14:39:30 GMT
From:	tip-bot for Peter Zijlstra <a.p.zijlstra@...llo.nl>
To:	linux-tip-commits@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, hpa@...or.com, mingo@...hat.com,
	torvalds@...ux-foundation.org, a.p.zijlstra@...llo.nl,
	efault@....de, npiggin@...nel.dk, akpm@...ux-foundation.org,
	tglx@...utronix.de, mingo@...e.hu
Subject: [tip:sched/domains] sched: Remove nodemask allocation

Commit-ID:  bf28b253266ebd73c331dde24d64606afde32ceb
Gitweb:     http://git.kernel.org/tip/bf28b253266ebd73c331dde24d64606afde32ceb
Author:     Peter Zijlstra <a.p.zijlstra@...llo.nl>
AuthorDate: Thu, 7 Apr 2011 14:09:55 +0200
Committer:  Ingo Molnar <mingo@...e.hu>
CommitDate: Mon, 11 Apr 2011 12:58:22 +0200

sched: Remove nodemask allocation

There's only one nodemask user left so remove it with a direct
computation and save some memory and reduce some code-flow
complexity.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc: Mike Galbraith <efault@....de>
Cc: Nick Piggin <npiggin@...nel.dk>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Link: http://lkml.kernel.org/r/20110407122942.505608966@chello.nl
Signed-off-by: Ingo Molnar <mingo@...e.hu>
---
 kernel/sched.c |   14 +++-----------
 1 files changed, 3 insertions(+), 11 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index d395fe5..f4d3a62 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6838,7 +6838,6 @@ struct sd_data {
 };
 
 struct s_data {
-	cpumask_var_t		nodemask;
 	cpumask_var_t		send_covered;
 	struct sched_domain ** __percpu sd;
 	struct sd_data 		sdd[SD_LV_MAX];
@@ -6850,7 +6849,6 @@ enum s_alloc {
 	sa_sd,
 	sa_sd_storage,
 	sa_send_covered,
-	sa_nodemask,
 	sa_none,
 };
 
@@ -7035,8 +7033,6 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
 		} /* fall through */
 	case sa_send_covered:
 		free_cpumask_var(d->send_covered); /* fall through */
-	case sa_nodemask:
-		free_cpumask_var(d->nodemask); /* fall through */
 	case sa_none:
 		break;
 	}
@@ -7049,10 +7045,8 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
 
 	memset(d, 0, sizeof(*d));
 
-	if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
-		return sa_none;
 	if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
-		return sa_nodemask;
+		return sa_none;
 	for (i = 0; i < SD_LV_MAX; i++) {
 		d->sdd[i].sd = alloc_percpu(struct sched_domain *);
 		if (!d->sdd[i].sd)
@@ -7149,7 +7143,8 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
 	struct sched_domain *sd;
 	sd = sd_init_CPU(d, i);
 	set_domain_attribute(sd, attr);
-	cpumask_copy(sched_domain_span(sd), d->nodemask);
+	cpumask_and(sched_domain_span(sd),
+			cpumask_of_node(cpu_to_node(i)), cpu_map);
 	sd->parent = parent;
 	if (parent)
 		parent->child = sd;
@@ -7219,9 +7214,6 @@ static int build_sched_domains(const struct cpumask *cpu_map,
 
 	/* Set up domains for cpus specified by the cpu_map. */
 	for_each_cpu(i, cpu_map) {
-		cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
-			    cpu_map);
-
 		sd = NULL;
 		sd = __build_allnodes_sched_domain(&d, cpu_map, attr, sd, i);
 		sd = __build_node_sched_domain(&d, cpu_map, attr, sd, i);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ