[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191011140908.5161-1-longman@redhat.com>
Date: Fri, 11 Oct 2019 10:09:08 -0400
From: Waiman Long <longman@...hat.com>
To: linux-kernel@...r.kernel.org, linux-rt-users@...r.kernel.org
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>,
Steven Rostedt <rostedt@...dmis.org>,
Juri Lelli <jlelli@...hat.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH RT] kernel/sched: Don't recompute cpumask weight in migrate_enable_update_cpus_allowed()
At each invocation of rt_spin_unlock(), cpumask_weight() is called
via migrate_enable_update_cpus_allowed() to recompute the weight of
cpus_mask which doesn't change that often.
The following is a sample output of perf-record running the testpmd
microbenchmark on an RT kernel:
34.77% 1.65% testpmd [kernel.kallsyms] [k] rt_spin_unlock
34.32% 2.52% testpmd [kernel.kallsyms] [k] migrate_enable
21.76% 21.76% testpmd [kernel.kallsyms] [k] __bitmap_weight
By adding an extra variable to keep track of the weight of cpus_mask,
we could eliminate the frequent call to cpumask_weight() and replace
it with simple assignment.
Signed-off-by: Waiman Long <longman@...hat.com>
---
include/linux/sched.h | 3 ++-
init/init_task.c | 1 +
kernel/fork.c | 4 +++-
kernel/sched/core.c | 8 +++++---
4 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7e892e727f12..c65c75b82056 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -656,7 +656,8 @@ struct task_struct {
#endif
unsigned int policy;
- int nr_cpus_allowed;
+ unsigned int nr_cpus_allowed; /* # in cpus_ptr */
+ unsigned int nr_cpus_mask; /* # in cpus_mask */
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
diff --git a/init/init_task.c b/init/init_task.c
index e402413dc47d..36bc82439ff1 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -81,6 +81,7 @@ struct task_struct init_task
.cpus_ptr = &init_task.cpus_mask,
.cpus_mask = CPU_MASK_ALL,
.nr_cpus_allowed= NR_CPUS,
+ .nr_cpus_mask = NR_CPUS,
.mm = NULL,
.active_mm = &init_mm,
.restart_block = {
diff --git a/kernel/fork.c b/kernel/fork.c
index 3c7738d87ddb..e00b92a18444 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -935,8 +935,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#ifdef CONFIG_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
#endif
- if (orig->cpus_ptr == &orig->cpus_mask)
+ if (orig->cpus_ptr == &orig->cpus_mask) {
tsk->cpus_ptr = &tsk->cpus_mask;
+ tsk->nr_cpus_allowed = tsk->nr_cpus_mask;
+ }
/*
* One for us, one for whoever does the "release_task()" (usually
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 93b4ae1ecaff..a299b7dd3de0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1126,7 +1126,9 @@ static int migration_cpu_stop(void *data)
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
cpumask_copy(&p->cpus_mask, new_mask);
- p->nr_cpus_allowed = cpumask_weight(new_mask);
+ p->nr_cpus_mask = cpumask_weight(new_mask);
+ if (p->cpus_ptr == &p->cpus_mask)
+ p->nr_cpus_allowed = p->nr_cpus_mask;
}
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
@@ -1173,7 +1175,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
if (__migrate_disabled(p)) {
lockdep_assert_held(&p->pi_lock);
- cpumask_copy(&p->cpus_mask, new_mask);
+ set_cpus_allowed_common(p, new_mask);
p->migrate_disable_update = 1;
return;
}
@@ -7335,7 +7337,7 @@ migrate_enable_update_cpus_allowed(struct task_struct *p)
rq = task_rq_lock(p, &rf);
p->cpus_ptr = &p->cpus_mask;
- p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
+ p->nr_cpus_allowed = p->nr_cpus_mask;
update_nr_migratory(p, 1);
task_rq_unlock(rq, p, &rf);
}
--
2.18.1
Powered by blists - more mailing lists