[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1491255857-17213-2-git-send-email-vikas.shivappa@linux.intel.com>
Date: Mon, 3 Apr 2017 14:44:15 -0700
From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To: vikas.shivappa@...el.com, x86@...nel.org,
linux-kernel@...r.kernel.org
Cc: hpa@...or.com, tglx@...utronix.de, mingo@...nel.org,
peterz@...radead.org, ravi.v.shankar@...el.com,
tony.luck@...el.com, fenghua.yu@...el.com, h.peter.anvin@...el.com
Subject: [PATCH 1/3] x86/intel_rdt: Fix issue when mkdir uses a freed CLOSid
Each resctrl directory has one CLOSid allocated which is mapped to a
control register/QOS_MSR. During an rmdir this CLOSid is freed and can
be reused later when a new directory is created. Currently we do not
reset the QOS_MSR to a default when the CLOSid is freed. So when the
next mkdir uses a freed CLOSid, the new directory is associated with a
stale QOS_MSR.
Fix this issue by writing a default value to the QOS_MSR when the
associated CLOSid is freed. The default value is all 1s for CBM which
means no control is enforced when a mkdir reuses this CLOSid.
Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 9ac2a5c..77e88c0 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -780,7 +780,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
return dentry;
}
-static int reset_all_cbms(struct rdt_resource *r)
+static int reset_all_ctrls(struct rdt_resource *r, u32 closid, u32 count)
{
struct msr_param msr_param;
cpumask_var_t cpu_mask;
@@ -791,8 +791,8 @@ static int reset_all_cbms(struct rdt_resource *r)
return -ENOMEM;
msr_param.res = r;
- msr_param.low = 0;
- msr_param.high = r->num_closid;
+ msr_param.low = closid;
+ msr_param.high = closid + count;
/*
* Disable resource control for this resource by setting all
@@ -802,7 +802,7 @@ static int reset_all_cbms(struct rdt_resource *r)
list_for_each_entry(d, &r->domains, list) {
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
- for (i = 0; i < r->num_closid; i++)
+ for (i = closid; i < closid + count; i++)
d->cbm[i] = r->max_cbm;
}
cpu = get_cpu();
@@ -896,7 +896,7 @@ static void rdt_kill_sb(struct super_block *sb)
/*Put everything back to default values. */
for_each_enabled_rdt_resource(r)
- reset_all_cbms(r);
+ reset_all_ctrls(r, 0, r->num_closid);
cdp_disable();
rmdir_all_sub();
static_branch_disable(&rdt_enable_key);
@@ -991,6 +991,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
{
int ret, cpu, closid = rdtgroup_default.closid;
struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
cpumask_var_t tmpmask;
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
@@ -1019,6 +1020,13 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
rdt_update_closid(tmpmask, NULL);
+ /*
+ * Put domain control values back to default for the
+ * rdtgrp thats being removed.
+ */
+ for_each_enabled_rdt_resource(r)
+ reset_all_ctrls(r, rdtgrp->closid, 1);
+
rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
list_del(&rdtgrp->rdtgroup_list);
--
1.9.1
Powered by blists - more mailing lists