[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1498503368-20173-19-git-send-email-vikas.shivappa@linux.intel.com>
Date: Mon, 26 Jun 2017 11:56:05 -0700
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: x86@...nel.org, linux-kernel@...r.kernel.org, tglx@...utronix.de
Cc: hpa@...or.com, peterz@...radead.org, ravi.v.shankar@...el.com,
vikas.shivappa@...el.com, tony.luck@...el.com,
fenghua.yu@...el.com, andi.kleen@...el.com
Subject: [PATCH 18/21] x86/intel_rdt/cqm: Add hotcpu support
Resource groups have a per domain directory under "mon_data". Add or
remove these directories as and when domains come online and go offline.
Also update the per cpu rmids and cache upon onlining and offlining
cpus.
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/kernel/cpu/intel_rdt.c | 28 +++++++++++++++-----
arch/x86/kernel/cpu/intel_rdt.h | 9 +++++++
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 44 ++++++++++++++++++++++++++++++++
3 files changed, 75 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 63bfb47c..8c7643d 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -438,6 +438,13 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
cpumask_set_cpu(cpu, &d->cpu_mask);
list_add_tail(&d->list, add_pos);
+
+ /*
+ * If resctrl is mounted, add
+ * per domain monitor data directories.
+ */
+ if (static_branch_unlikely(&rdt_mon_enable_key))
+ mkdir_mondata_subdir_allrdtgrp(r, d->id);
}
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
@@ -453,19 +460,28 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cpumask_clear_cpu(cpu, &d->cpu_mask);
if (cpumask_empty(&d->cpu_mask)) {
+ /*
+ * If resctrl is mounted, remove all the
+ * per domain monitor data directories.
+ */
+ if (static_branch_unlikely(&rdt_mon_enable_key))
+ rmdir_mondata_subdir_allrdtgrp(r, d->id);
kfree(d->ctrl_val);
list_del(&d->list);
kfree(d);
}
}
-static void clear_closid(int cpu)
+static void clear_closid_rmid(int cpu)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
per_cpu(cpu_closid, cpu) = 0;
+ per_cpu(cpu_rmid, cpu) = 0;
+
state->closid = 0;
- wrmsr(IA32_PQR_ASSOC, state->rmid, 0);
+ state->rmid = 0;
+ wrmsr(IA32_PQR_ASSOC, 0, 0);
}
static int intel_rdt_online_cpu(unsigned int cpu)
@@ -473,11 +489,11 @@ static int intel_rdt_online_cpu(unsigned int cpu)
struct rdt_resource *r;
mutex_lock(&rdtgroup_mutex);
- for_each_alloc_capable_rdt_resource(r)
+ for_each_capable_rdt_resource(r)
domain_add_cpu(cpu, r);
/* The cpu is set in default rdtgroup after online. */
cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
- clear_closid(cpu);
+ clear_closid_rmid(cpu);
mutex_unlock(&rdtgroup_mutex);
return 0;
@@ -500,7 +516,7 @@ static int intel_rdt_offline_cpu(unsigned int cpu)
struct rdt_resource *r;
mutex_lock(&rdtgroup_mutex);
- for_each_alloc_capable_rdt_resource(r)
+ for_each_capable_rdt_resource(r)
domain_remove_cpu(cpu, r);
list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
@@ -508,7 +524,7 @@ static int intel_rdt_offline_cpu(unsigned int cpu)
break;
}
}
- clear_closid(cpu);
+ clear_closid_rmid(cpu);
mutex_unlock(&rdtgroup_mutex);
return 0;
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index ea7a86f..b771dae 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -290,6 +290,11 @@ enum {
RDT_NUM_RESOURCES,
};
+#define for_each_capable_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->alloc_capable || r->mon_capable)
+
#define for_each_alloc_capable_rdt_resource(r) \
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
r++) \
@@ -350,5 +355,9 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
void rdt_get_mon_l3_config(struct rdt_resource *r);
void mon_event_count(void *info);
int rdtgroup_mondata_show(struct seq_file *m, void *arg);
+void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
+ unsigned int dom_id);
+void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
+ unsigned int dom_id);
#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 2384c07..8e1581a 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -1348,6 +1348,27 @@ static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
return ret;
}
+/*
+ * Remove all subdirectories of mon_data of ctrl_mon groups
+ * and monitor groups with given domain id.
+ */
+void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
+{
+ struct rdtgroup *pr, *cr;
+ char name[32];
+
+ if (!r->mon_enabled)
+ return;
+
+ list_for_each_entry(pr, &rdt_all_groups, rdtgroup_list) {
+ sprintf(name, "mon_%s_%02d", r->name, dom_id);
+ kernfs_remove_by_name(pr->mon_data_kn, name);
+
+ list_for_each_entry(cr, &pr->crdtgrp_list, crdtgrp_list)
+ kernfs_remove_by_name(cr->mon_data_kn, name);
+ }
+}
+
static int get_rdt_resourceid(struct rdt_resource *r)
{
if (r > (rdt_resources_all + RDT_NUM_RESOURCES - 1) ||
@@ -1407,6 +1428,29 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, int domid,
return ret;
}
+/*
+ * Add all subdirectories of mon_data for "ctrl_mon" groups
+ * and "monitor" groups with given domain id.
+ */
+void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int domid)
+{
+ struct kernfs_node *parent_kn;
+ struct rdtgroup *pr, *cr;
+
+ if (!r->mon_enabled)
+ return;
+
+ list_for_each_entry(pr, &rdt_all_groups, rdtgroup_list) {
+ parent_kn = pr->mon_data_kn;
+ mkdir_mondata_subdir(parent_kn, domid, r, pr);
+
+ list_for_each_entry(cr, &pr->crdtgrp_list, crdtgrp_list) {
+ parent_kn = cr->mon_data_kn;
+ mkdir_mondata_subdir(parent_kn, domid, r, cr);
+ }
+ }
+}
+
static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
struct rdt_resource *r,
struct rdtgroup *pr)
--
1.9.1
Powered by blists - more mailing lists