[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <1452189456-8486-5-git-send-email-mgorman@techsingularity.net>
Date: Thu, 7 Jan 2016 17:57:35 +0000
From: Mel Gorman <mgorman@...hsingularity.net>
To: Linux-Stable <stable@...r.kernel.org>
Cc: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
LKML <linux-kernel@...r.kernel.org>,
Mel Gorman <mgorman@...hsingularity.net>
Subject: [PATCH 4/5] sched/numa: Convert sched_numa_balancing to a static_branch
From: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
commit 2a595721a1fa6b684c1c818f379bef834ac3d65e upstream.
Variable sched_numa_balancing toggles numa_balancing feature. Hence
moving from a simple read mostly variable to a more apt static_branch.
Suggested-by: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Srikar Dronamraju <srikar@...ux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Mel Gorman <mgorman@...e.de>
Cc: Mike Galbraith <efault@....de>
Cc: Rik van Riel <riel@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Link: http://lkml.kernel.org/r/1439310261-16124-1-git-send-email-srikar@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
Signed-off-by: Mel Gorman <mgorman@...hsingularity.net>
---
kernel/sched/core.c | 12 +++++-------
kernel/sched/fair.c | 6 +++---
kernel/sched/sched.h | 6 +-----
3 files changed, 9 insertions(+), 15 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6cd6ce1fe161..5998e12e14b4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2114,18 +2114,16 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
#endif /* CONFIG_NUMA_BALANCING */
}
+DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
+
#ifdef CONFIG_NUMA_BALANCING
-__read_mostly bool sched_numa_balancing;
void set_numabalancing_state(bool enabled)
{
- sched_numa_balancing = enabled;
-#ifdef CONFIG_SCHED_DEBUG
if (enabled)
- sched_feat_set("NUMA");
+ static_branch_enable(&sched_numa_balancing);
else
- sched_feat_set("NO_NUMA");
-#endif /* CONFIG_SCHED_DEBUG */
+ static_branch_disable(&sched_numa_balancing);
}
#ifdef CONFIG_PROC_SYSCTL
@@ -2134,7 +2132,7 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
{
struct ctl_table t;
int err;
- int state = sched_numa_balancing;
+ int state = static_branch_likely(&sched_numa_balancing);
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0d5987e8e329..7975932034f9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2069,7 +2069,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
int local = !!(flags & TNF_FAULT_LOCAL);
int priv;
- if (!sched_numa_balancing)
+ if (!static_branch_likely(&sched_numa_balancing))
return;
/* for example, ksmd faulting in a user's mm */
@@ -5525,7 +5525,7 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
unsigned long src_faults, dst_faults;
int src_nid, dst_nid;
- if (!sched_numa_balancing)
+ if (!static_branch_likely(&sched_numa_balancing))
return -1;
if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
@@ -7811,7 +7811,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
entity_tick(cfs_rq, se, queued);
}
- if (sched_numa_balancing)
+ if (!static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index fb75db386adc..01875872cc94 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1003,11 +1003,7 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
-#ifdef CONFIG_NUMA_BALANCING
-extern bool sched_numa_balancing;
-#else
-#define sched_numa_balancing (0)
-#endif /* CONFIG_NUMA_BALANCING */
+extern struct static_key_false sched_numa_balancing;
static inline u64 global_rt_period(void)
{
--
2.6.4
Powered by blists - more mailing lists