[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1479511084-59727-2-git-send-email-fenghua.yu@intel.com>
Date: Fri, 18 Nov 2016 15:18:04 -0800
From: "Fenghua Yu" <fenghua.yu@...el.com>
To: "Thomas Gleixner" <tglx@...utronix.de>
Cc: "H. Peter Anvin" <h.peter.anvin@...el.com>,
"Ingo Molnar" <mingo@...e.hu>, "Tony Luck" <tony.luck@...el.com>,
"Ravi V Shankar" <ravi.v.shankar@...el.com>,
"Sai Prakhya" <sai.praneeth.prakhya@...el.com>,
"Vikas Shivappa" <vikas.shivappa@...ux.intel.com>,
"linux-kernel" <linux-kernel@...r.kernel.org>,
"x86" <x86@...nel.org>, "Fenghua Yu" <fenghua.yu@...el.com>
Subject: [PATCH 2/2] x86/intel_rdt: Update task closid immediately on CPU in rmdir and unmount
From: Fenghua Yu <fenghua.yu@...el.com>
When removing a sub directory/rdtgroup by rmdir or umount, closid in a
task in the sub directory is set to default rdtgroup's closid which is 0.
If the task is running on a CPU, the PQR_ASSOC MSR is only updated
when the task runs through a context switch. Up to the context switch,
the task runs with the wrong closid.
Make the change immediately effective by invoking a smp function call
on all online CPUs which calls intel_rdt_sched_in() to update the
PQR_ASSOC MSR.
rdt_update_closid() (renamed from rdt_update_percpu_closid()) calls
intel_rdt_sched_in() to update closid in the PQR_ASSOC MSR on a CPU.
The task closid and percpu closid are set up before
rdt_update_closid() is called. Handling PQR_ASSOC MSR update for
both task closid and percpu closid in rdt_update_closid() reduces
redundant smp function calls.
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
---
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 88 +++++++++++++++++++++++---------
1 file changed, 63 insertions(+), 25 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index eccea8a..ff0ee57 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -194,12 +194,14 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
/*
* This is safe against intel_rdt_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
- * from rdt_update_percpu_closid() is proteced against __switch_to() because
+ * from rdt_update_closid() is proteced against __switch_to() because
* preemption is disabled.
+ *
+ * Task closid and percpu closid should be set up before calling
+ * this function.
*/
-static void rdt_update_cpu_closid(void *v)
+static void rdt_update_cpu_closid(void *unused)
{
- this_cpu_write(cpu_closid, *(int *)v);
/*
* We cannot unconditionally write the MSR because the current
* executing task might have its own closid selected. Just reuse
@@ -208,14 +210,14 @@ static void rdt_update_cpu_closid(void *v)
intel_rdt_sched_in();
}
-/* Update the per cpu closid and eventually the PGR_ASSOC MSR */
-static void rdt_update_percpu_closid(const struct cpumask *cpu_mask, int closid)
+/* Update closid in the PGR_ASSOC MSR */
+static void rdt_update_closid(const struct cpumask *cpu_mask)
{
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, cpu_mask))
- rdt_update_cpu_closid(&closid);
- smp_call_function_many(cpu_mask, rdt_update_cpu_closid, &closid, 1);
+ rdt_update_cpu_closid(NULL);
+ smp_call_function_many(cpu_mask, rdt_update_cpu_closid, NULL, 1);
put_cpu();
}
@@ -224,7 +226,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
{
cpumask_var_t tmpmask, newmask;
struct rdtgroup *rdtgrp, *r;
- int ret;
+ int cpu, ret;
if (!buf)
return -EINVAL;
@@ -264,7 +266,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
/* Give any dropped cpus to rdtgroup_default */
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, tmpmask);
- rdt_update_percpu_closid(tmpmask, rdtgroup_default.closid);
+ for_each_cpu(cpu, tmpmask)
+ per_cpu(cpu_closid, cpu) = rdtgroup_default.closid;
+ rdt_update_closid(tmpmask);
}
/*
@@ -278,7 +282,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
continue;
cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
}
- rdt_update_percpu_closid(tmpmask, rdtgrp->closid);
+ for_each_cpu(cpu, tmpmask)
+ per_cpu(cpu_closid, cpu) = rdtgrp->closid;
+ rdt_update_closid(tmpmask);
}
/* Done pushing/pulling - update this group with new mask */
@@ -806,19 +812,33 @@ static int reset_all_cbms(struct rdt_resource *r)
return 0;
}
+static int rdt_move_task_closid(struct rdtgroup *from, struct rdtgroup *to)
+{
+ struct task_struct *p, *t;
+ bool moved_task = false;
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(p, t) {
+ if (!from || t->closid == from->closid) {
+ t->closid = to->closid;
+ moved_task = true;
+ }
+ }
+ read_unlock(&tasklist_lock);
+
+ return moved_task;
+}
+
/*
* Forcibly remove all of subdirectories under root.
*/
static void rmdir_all_sub(void)
{
struct rdtgroup *rdtgrp, *tmp;
- struct task_struct *p, *t;
+ int cpu;
/* move all tasks to default resource group */
- read_lock(&tasklist_lock);
- for_each_process_thread(p, t)
- t->closid = 0;
- read_unlock(&tasklist_lock);
+ rdt_move_task_closid(NULL, &rdtgroup_default);
list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
/* Remove each rdtgroup other than root */
@@ -833,13 +853,18 @@ static void rmdir_all_sub(void)
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
- rdt_update_percpu_closid(&rdtgrp->cpu_mask,
- rdtgroup_default.closid);
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ per_cpu(cpu_closid, cpu) = rdtgroup_default.closid;
kernfs_remove(rdtgrp->kn);
list_del(&rdtgrp->rdtgroup_list);
kfree(rdtgrp);
}
+ /* Simply notify every online CPU to update PQR_ASSOC MSR */
+ get_online_cpus();
+ rdt_update_closid(cpu_online_mask);
+ put_online_cpus();
+
kernfs_remove(kn_info);
}
@@ -944,8 +969,9 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
static int rdtgroup_rmdir(struct kernfs_node *kn)
{
- struct task_struct *p, *t;
struct rdtgroup *rdtgrp;
+ bool moved_task = false;
+ int cpu;
rdtgrp = rdtgroup_kn_lock_live(kn);
if (!rdtgrp) {
@@ -954,17 +980,29 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
}
/* Give any tasks back to the default group */
- read_lock(&tasklist_lock);
- for_each_process_thread(p, t) {
- if (t->closid == rdtgrp->closid)
- t->closid = 0;
- }
- read_unlock(&tasklist_lock);
+ moved_task = rdt_move_task_closid(rdtgrp, &rdtgroup_default);
/* Give any CPUs back to the default group */
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
- rdt_update_percpu_closid(&rdtgrp->cpu_mask, rdtgroup_default.closid);
+
+ /* Update per cpu closid storage in rdtgrp */
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ per_cpu(cpu_closid, cpu) = rdtgroup_default.closid;
+
+ /*
+ * If we moved any tasks out of this group, then force an
+ * update on *all* CPUs so this will take effect right away.
+ * Otherwise if we reallocated some CPUs from this group
+ * we can just update the affected CPUs.
+ */
+ if (moved_task) {
+ get_online_cpus();
+ rdt_update_closid(cpu_online_mask);
+ put_online_cpus();
+ } else if (!cpumask_empty(&rdtgrp->cpu_mask)) {
+ rdt_update_closid(&rdtgrp->cpu_mask);
+ }
rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
--
2.5.0
Powered by blists - more mailing lists