lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1424819804-4082-4-git-send-email-vikas.shivappa@linux.intel.com>
Date:	Tue, 24 Feb 2015 15:16:40 -0800
From:	Vikas Shivappa <vikas.shivappa@...ux.intel.com>
To:	linux-kernel@...r.kernel.org
Cc:	vikas.shivappa@...el.com, vikas.shivappa@...ux.intel.com,
	matt.fleming@...el.com, hpa@...or.com, tglx@...utronix.de,
	mingo@...nel.org, tj@...nel.org, peterz@...radead.org,
	will.auld@...el.com, dave.hansen@...el.com, andi.kleen@...el.com,
	tony.luck@...el.com, kanaka.d.juvva@...el.com
Subject: [PATCH 3/7] x86/intel_rdt: Support cache bit mask for Intel CAT

Add support for cache bit mask manipulation. The change adds a file to
the RDT cgroup which represents the CBM(cache bit mask) for the cgroup.

The RDT cgroup follows cgroup hierarchy ,mkdir and adding tasks to the
cgroup never fails.  When a child cgroup is created it inherits the
CLOSid and the CBM from its parent.  When a user changes the default
CBM for a cgroup, a new CLOSid may be allocated if the CBM was not
used before. If the new CBM is the one that is already used, the
count for that CLOSid<->CBM is incremented. The changing of 'cbm'
may fail with -ENOSPC once the kernel runs out of maximum CLOSids it
can support.
User can create as many cgroups as he wants but having different CBMs
at the same time is restricted by the maximum number of CLOSids
(multiple cgroups can have the same CBM).
Kernel maintains a CLOSid<->cbm mapping which keeps count
of cgroups using a CLOSid.

The tasks in the CAT cgroup would get to fill the LLC cache represented
by the cgroup's 'cbm' file.

Reuse of CLOSids for cgroups with same bitmask also has following
advantages:
- This helps to use the scant CLOSids optimally.
- This also implies that during context switch, write to PQR-MSR is done
only when a task with a different bitmask is scheduled in.

Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
---
 arch/x86/include/asm/intel_rdt.h |   3 +
 arch/x86/kernel/cpu/intel_rdt.c  | 179 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 182 insertions(+)

diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
index ecd9664..a414771 100644
--- a/arch/x86/include/asm/intel_rdt.h
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -4,6 +4,9 @@
 #ifdef CONFIG_CGROUP_RDT
 
 #include <linux/cgroup.h>
+#define MAX_CBM_LENGTH			32
+#define IA32_L3_CBM_BASE		0xc90
+#define CBM_FROM_INDEX(x)		(IA32_L3_CBM_BASE + x)
 
 struct rdt_subsys_info {
 	/* Clos Bitmap to keep track of available CLOSids.*/
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 6cf1a16..dd090a7 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -33,6 +33,9 @@ static struct rdt_subsys_info rdtss_info;
 static DEFINE_MUTEX(rdt_group_mutex);
 struct intel_rdt rdt_root_group;
 
+#define rdt_for_each_child(pos_css, parent_ir)		\
+	css_for_each_child((pos_css), &(parent_ir)->css)
+
 static inline bool cat_supported(struct cpuinfo_x86 *c)
 {
 	if (cpu_has(c, X86_FEATURE_CAT_L3))
@@ -84,6 +87,30 @@ static int __init rdt_late_init(void)
 late_initcall(rdt_late_init);
 
 /*
+ * Allocates a new closid from unused closids.
+ * Called with the rdt_group_mutex held.
+ */
+
+static int rdt_alloc_closid(struct intel_rdt *ir)
+{
+	unsigned int id;
+	unsigned int maxid;
+
+	lockdep_assert_held(&rdt_group_mutex);
+
+	maxid = boot_cpu_data.x86_cat_closs;
+	id = find_next_zero_bit(rdtss_info.closmap, maxid, 0);
+	if (id == maxid)
+		return -ENOSPC;
+
+	set_bit(id, rdtss_info.closmap);
+	ccmap[id].cgrp_count++;
+	ir->clos = id;
+
+	return 0;
+}
+
+/*
 * Called with the rdt_group_mutex held.
 */
 static int rdt_free_closid(struct intel_rdt *ir)
@@ -135,8 +162,160 @@ static void rdt_css_free(struct cgroup_subsys_state *css)
 	mutex_unlock(&rdt_group_mutex);
 }
 
+/*
+ * Tests if atleast two contiguous bits are set.
+ */
+
+static inline bool cbm_is_contiguous(unsigned long var)
+{
+	unsigned long first_bit, zero_bit;
+	unsigned long maxcbm = MAX_CBM_LENGTH;
+
+	if (bitmap_weight(&var, maxcbm) < 2)
+		return false;
+
+	first_bit = find_next_bit(&var, maxcbm, 0);
+	zero_bit = find_next_zero_bit(&var, maxcbm, first_bit);
+
+	if (find_next_bit(&var, maxcbm, zero_bit) < maxcbm)
+		return false;
+
+	return true;
+}
+
+static int cat_cbm_read(struct seq_file *m, void *v)
+{
+	struct intel_rdt *ir = css_rdt(seq_css(m));
+
+	seq_bitmap(m, ir->cbm, MAX_CBM_LENGTH);
+	seq_putc(m, '\n');
+	return 0;
+}
+
+static int validate_cbm(struct intel_rdt *ir, unsigned long cbmvalue)
+{
+	struct intel_rdt *par, *c;
+	struct cgroup_subsys_state *css;
+
+	if (!cbm_is_contiguous(cbmvalue)) {
+		pr_info("cbm should have >= 2 bits and be contiguous\n");
+		return -EINVAL;
+	}
+
+	par = parent_rdt(ir);
+	if (!bitmap_subset(&cbmvalue, par->cbm, MAX_CBM_LENGTH))
+		return -EINVAL;
+
+	rcu_read_lock();
+	rdt_for_each_child(css, ir) {
+		c = css_rdt(css);
+		if (!bitmap_subset(c->cbm, &cbmvalue, MAX_CBM_LENGTH)) {
+			pr_info("Children's mask not a subset\n");
+			rcu_read_unlock();
+			return -EINVAL;
+		}
+	}
+
+	rcu_read_unlock();
+	return 0;
+}
+
+static bool cbm_search(unsigned long cbm, int *closid)
+{
+	int maxid = boot_cpu_data.x86_cat_closs;
+	unsigned int i;
+
+	for (i = 0; i < maxid; i++)
+		if (bitmap_equal(&cbm, &ccmap[i].cbm, MAX_CBM_LENGTH)) {
+			*closid = i;
+			return true;
+		}
+
+	return false;
+}
+
+static void cbmmap_dump(void)
+{
+	int i;
+
+	pr_debug("CBMMAP\n");
+	for (i = 0; i < boot_cpu_data.x86_cat_closs; i++)
+		pr_debug("cbm: 0x%x,cgrp_count: %u\n",
+		 (unsigned int)ccmap[i].cbm, ccmap[i].cgrp_count);
+}
+
+/*
+ * rdt_cbm_write() - Validates and writes the cache bit mask(cbm)
+ * to the IA32_L3_MASK_n and also store the same in the ccmap.
+ *
+ * CLOSids are reused for cgroups which have same bitmask.
+ * - This helps to use the scant CLOSids optimally.
+ * - This also implies that at context switch write
+ * to PQR-MSR is done only when a task with a
+ * different bitmask is scheduled in.
+ */
+
+static int cat_cbm_write(struct cgroup_subsys_state *css,
+				 struct cftype *cft, u64 cbmvalue)
+{
+	struct intel_rdt *ir = css_rdt(css);
+	ssize_t err = 0;
+	unsigned long cbm;
+	unsigned int closid;
+	u32 cbm_mask =
+		(u32)((u64)(1 << boot_cpu_data.x86_cat_cbmlength) - 1);
+
+	if (ir == &rdt_root_group)
+		return -EPERM;
+
+	/*
+	* Need global mutex as cbm write may allocate a closid.
+	*/
+	mutex_lock(&rdt_group_mutex);
+	cbm = cbmvalue & cbm_mask;
+
+	if (bitmap_equal(&cbm, ir->cbm, MAX_CBM_LENGTH))
+		goto out;
+
+	err = validate_cbm(ir, cbm);
+	if (err)
+		goto out;
+
+	rdt_free_closid(ir);
+	if (cbm_search(cbm, &closid)) {
+		ir->clos = closid;
+		ccmap[ir->clos].cgrp_count++;
+	} else {
+		err = rdt_alloc_closid(ir);
+		if (err)
+			goto out;
+
+		wrmsrl(CBM_FROM_INDEX(ir->clos), cbm);
+	}
+
+	ccmap[ir->clos].cbm = cbm;
+	ir->cbm = &ccmap[ir->clos].cbm;
+	cbmmap_dump();
+
+out:
+
+	mutex_unlock(&rdt_group_mutex);
+	return err;
+}
+
+static struct cftype rdt_files[] = {
+	{
+		.name = "cbm",
+		.seq_show = cat_cbm_read,
+		.write_u64 = cat_cbm_write,
+		.mode = 0666,
+	},
+	{ }	/* terminate */
+};
+
 struct cgroup_subsys rdt_cgrp_subsys = {
 	.css_alloc			= rdt_css_alloc,
 	.css_free			= rdt_css_free,
+	.legacy_cftypes			= rdt_files,
 	.early_init			= 0,
 };
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ