lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-d4223b381c10bff94dc7491806b6108429831fc6@git.kernel.org>
Date:	Fri, 18 Dec 2015 13:35:12 -0800
From:	tip-bot for Fenghua Yu <tipbot@...or.com>
To:	linux-tip-commits@...r.kernel.org
Cc:	hpa@...or.com, vikas.shivappa@...ux.intel.com, mingo@...nel.org,
	linux-kernel@...r.kernel.org, fenghua.yu@...el.com,
	tglx@...utronix.de
Subject: [tip:x86/cache] x86/intel_rdt: Add Class of service management

Commit-ID:  d4223b381c10bff94dc7491806b6108429831fc6
Gitweb:     http://git.kernel.org/tip/d4223b381c10bff94dc7491806b6108429831fc6
Author:     Fenghua Yu <fenghua.yu@...el.com>
AuthorDate: Thu, 17 Dec 2015 14:46:10 -0800
Committer:  H. Peter Anvin <hpa@...ux.intel.com>
CommitDate: Fri, 18 Dec 2015 13:17:56 -0800

x86/intel_rdt: Add Class of service management

From: Vikas Shivappa <vikas.shivappa@...ux.intel.com>

Adds some data-structures and APIs to support Class of service
management(closid). There is a new clos_cbm table which keeps a 1:1
mapping between closid and capacity bit mask (cbm)
and a count of usage of closid. Each task would be associated with a
Closid at a time and this patch adds a new field closid to task_struct
to keep track of the same.

Signed-off-by: Vikas Shivappa <vikas.shivappa@...ux.intel.com>
Link: http://lkml.kernel.org/r/1450392376-6397-6-git-send-email-fenghua.yu@intel.com
Signed-off-by: Fenghua Yu <fenghua.yu@...el.com>
---
 arch/x86/include/asm/intel_rdt.h | 12 ++++++
 arch/x86/kernel/cpu/intel_rdt.c  | 82 +++++++++++++++++++++++++++++++++++++++-
 include/linux/sched.h            |  3 ++
 3 files changed, 95 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
new file mode 100644
index 0000000..88b7643
--- /dev/null
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -0,0 +1,12 @@
+#ifndef _RDT_H_
+#define _RDT_H_
+
+#ifdef CONFIG_INTEL_RDT
+
+struct clos_cbm_table {
+	unsigned long l3_cbm;
+	unsigned int clos_refcnt;
+};
+
+#endif
+#endif
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index f49e970..d79213a 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -24,17 +24,95 @@
 
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <asm/intel_rdt.h>
+
+/*
+ * cctable maintains 1:1 mapping between CLOSid and cache bitmask.
+ */
+static struct clos_cbm_table *cctable;
+/*
+ * closid availability bit map.
+ */
+unsigned long *closmap;
+static DEFINE_MUTEX(rdt_group_mutex);
+
+static inline void closid_get(u32 closid)
+{
+	struct clos_cbm_table *cct = &cctable[closid];
+
+	lockdep_assert_held(&rdt_group_mutex);
+
+	cct->clos_refcnt++;
+}
+
+static int closid_alloc(u32 *closid)
+{
+	u32 maxid;
+	u32 id;
+
+	lockdep_assert_held(&rdt_group_mutex);
+
+	maxid = boot_cpu_data.x86_cache_max_closid;
+	id = find_first_zero_bit(closmap, maxid);
+	if (id == maxid)
+		return -ENOSPC;
+
+	set_bit(id, closmap);
+	closid_get(id);
+	*closid = id;
+
+	return 0;
+}
+
+static inline void closid_free(u32 closid)
+{
+	clear_bit(closid, closmap);
+	cctable[closid].l3_cbm = 0;
+}
+
+static void closid_put(u32 closid)
+{
+	struct clos_cbm_table *cct = &cctable[closid];
+
+	lockdep_assert_held(&rdt_group_mutex);
+	if (WARN_ON(!cct->clos_refcnt))
+		return;
+
+	if (!--cct->clos_refcnt)
+		closid_free(closid);
+}
 
 static int __init intel_rdt_late_init(void)
 {
 	struct cpuinfo_x86 *c = &boot_cpu_data;
+	u32 maxid, max_cbm_len;
+	int err = 0, size;
 
 	if (!cpu_has(c, X86_FEATURE_CAT_L3))
 		return -ENODEV;
 
-	pr_info("Intel cache allocation detected\n");
+	maxid = c->x86_cache_max_closid;
+	max_cbm_len = c->x86_cache_max_cbm_len;
 
-	return 0;
+	size = maxid * sizeof(struct clos_cbm_table);
+	cctable = kzalloc(size, GFP_KERNEL);
+	if (!cctable) {
+		err = -ENOMEM;
+		goto out_err;
+	}
+
+	size = BITS_TO_LONGS(maxid) * sizeof(long);
+	closmap = kzalloc(size, GFP_KERNEL);
+	if (!closmap) {
+		kfree(cctable);
+		err = -ENOMEM;
+		goto out_err;
+	}
+
+	pr_info("Intel cache allocation enabled\n");
+out_err:
+
+	return err;
 }
 
 late_initcall(intel_rdt_late_init);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index edad7a4..0a6db46 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1668,6 +1668,9 @@ struct task_struct {
 	/* cg_list protected by css_set_lock and tsk->alloc_lock */
 	struct list_head cg_list;
 #endif
+#ifdef CONFIG_INTEL_RDT
+	u32 closid;
+#endif
 #ifdef CONFIG_FUTEX
 	struct robust_list_head __user *robust_list;
 #ifdef CONFIG_COMPAT
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ