lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-3edcf2ff7ae50d1096030fab9a1bafb421e07d4c@git.kernel.org>
Date:	Wed, 4 May 2016 00:19:36 -0700
From:	tip-bot for Mike Travis <tipbot@...or.com>
To:	linux-tip-commits@...r.kernel.org
Cc:	hpa@...or.com, peterz@...radead.org, tglx@...utronix.de,
	gfk@....com, brgerst@...il.com, linux-kernel@...r.kernel.org,
	sivanich@....com, rja@....com, nzimmer@....com,
	luto@...capital.net, akpm@...ux-foundation.org, travis@....com,
	abanman@....com, dvlasenk@...hat.com, bp@...en8.de,
	len.brown@...el.com, estabrook@....com, mingo@...nel.org,
	torvalds@...ux-foundation.org
Subject: [tip:x86/platform] x86/platform/UV: Allocate common per node hub
 info structs on local node

Commit-ID:  3edcf2ff7ae50d1096030fab9a1bafb421e07d4c
Gitweb:     http://git.kernel.org/tip/3edcf2ff7ae50d1096030fab9a1bafb421e07d4c
Author:     Mike Travis <travis@....com>
AuthorDate: Fri, 29 Apr 2016 16:54:15 -0500
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Wed, 4 May 2016 08:48:49 +0200

x86/platform/UV: Allocate common per node hub info structs on local node

Allocate and setup per node hub info structs.  CPU 0/Node 0 hub info
is statically allocated to be accessible early in system startup.  The
remaining hub info structs are allocated on the node's local memory,
and shared among the CPU's on that node.  This leaves the small amount
of info unique to each CPU in the per CPU info struct.

Memory is saved by combining the common per node info fields to common
node local structs.  In addtion, since the info is read only only after
setup, it should stay in the L3 cache of the local processor socket.
This should therefore improve the cache hit rate when a group of cpus
on a node are all interrupted for a common task.

Tested-by: John Estabrook <estabrook@....com>
Tested-by: Gary Kroening <gfk@....com>
Tested-by: Nathan Zimmer <nzimmer@....com>
Signed-off-by: Mike Travis <travis@....com>
Reviewed-by: Dimitri Sivanich <sivanich@....com>
Reviewed-by: Andrew Banman <abanman@....com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Andy Lutomirski <luto@...capital.net>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Brian Gerst <brgerst@...il.com>
Cc: Denys Vlasenko <dvlasenk@...hat.com>
Cc: H. Peter Anvin <hpa@...or.com>
Cc: Len Brown <len.brown@...el.com>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Russ Anderson <rja@....com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Link: http://lkml.kernel.org/r/20160429215404.813051625@asylum.americas.sgi.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/include/asm/uv/uv_hub.h   | 36 ++++++++++++++++++++---
 arch/x86/kernel/apic/x2apic_uv_x.c | 59 ++++++++++++++++++++++++++++----------
 2 files changed, 76 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 4a6f02a..35987d9 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -163,10 +163,6 @@ struct uv_hub_info_s {
 	unsigned char		n_val;
 };
 
-DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-#define uv_hub_info		this_cpu_ptr(&__uv_hub_info)
-#define uv_cpu_hub_info(cpu)	(&per_cpu(__uv_hub_info, cpu))
-
 /* CPU specific info with a pointer to the hub common info struct */
 struct uv_cpu_info_s {
 	void			*p_uv_hub_info;
@@ -181,6 +177,38 @@ DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
 #define	uv_scir_info		(&uv_cpu_info->scir)
 #define	uv_cpu_scir_info(cpu)	(&uv_cpu_info_per(cpu)->scir)
 
+/* Node specific hub common info struct */
+extern void **__uv_hub_info_list;
+static inline struct uv_hub_info_s *uv_hub_info_list(int node)
+{
+	return (struct uv_hub_info_s *)__uv_hub_info_list[node];
+}
+
+static inline struct uv_hub_info_s *_uv_hub_info(void)
+{
+	return (struct uv_hub_info_s *)uv_cpu_info->p_uv_hub_info;
+}
+#define	uv_hub_info	_uv_hub_info()
+
+static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
+{
+	return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
+}
+
+#define	UV_HUB_INFO_VERSION	0x7150
+extern int uv_hub_info_version(void);
+static inline int uv_hub_info_check(int version)
+{
+	if (uv_hub_info_version() == version)
+		return 0;
+
+	pr_crit("UV: uv_hub_info version(%x) mismatch, expecting(%x)\n",
+		uv_hub_info_version(), version);
+
+	BUG();	/* Catastrophic - cannot continue on unknown UV system */
+}
+#define	_uv_hub_info_check()	uv_hub_info_check(UV_HUB_INFO_VERSION)
+
 /*
  * HUB revision ranges for each UV HUB architecture.
  * This is a software convention - NOT the hardware revision numbers in
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 69aa2a9..35c9610 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -54,6 +54,7 @@ unsigned int uv_apicid_hibits;
 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
 
 static struct apic apic_x2apic_uv_x;
+static struct uv_hub_info_s uv_hub_info_node0;
 
 /* Set this to use hardware error handler instead of kernel panic */
 static int disable_uv_undefined_panic = 1;
@@ -165,6 +166,9 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 	if (strncmp(oem_id, "SGI", 3) != 0)
 		return 0;
 
+	/* Setup early hub type field in uv_hub_info for Node 0 */
+	uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
+
 	/*
 	 * Determine UV arch type.
 	 *   SGI: UV100/1000
@@ -228,8 +232,8 @@ int is_uv_system(void)
 }
 EXPORT_SYMBOL_GPL(is_uv_system);
 
-DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
+void **__uv_hub_info_list;
+EXPORT_SYMBOL_GPL(__uv_hub_info_list);
 
 DEFINE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
 EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_info);
@@ -249,6 +253,12 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
 unsigned long sn_rtc_cycles_per_second;
 EXPORT_SYMBOL(sn_rtc_cycles_per_second);
 
+extern int uv_hub_info_version(void)
+{
+	return UV_HUB_INFO_VERSION;
+}
+EXPORT_SYMBOL(uv_hub_info_version);
+
 static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
 {
 	unsigned long val;
@@ -988,9 +998,15 @@ void __init uv_system_init(void)
 
 	uv_init_hub_info(&hub_info);
 
-	for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
-		uv_possible_blades +=
-		  hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
+	pr_info("UV: NODE_PRESENT_DEPTH = %d\n", UVH_NODE_PRESENT_TABLE_DEPTH);
+	for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
+		unsigned long np;
+
+		np = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
+		if (np)
+			pr_info("UV: NODE_PRESENT(%d) = 0x%016lx\n", i, np);
+		uv_possible_blades += hweight64(np);
+	}
 
 	/* uv_num_possible_blades() is really the hub count */
 	pr_info("UV: Found %d hubs, %d nodes, %d cpus\n",
@@ -1016,6 +1032,10 @@ void __init uv_system_init(void)
 	BUG_ON(!uv_cpu_to_blade);
 	memset(uv_cpu_to_blade, 255, bytes);
 
+	bytes = sizeof(void *) * uv_num_possible_blades();
+	__uv_hub_info_list = kzalloc(bytes, GFP_KERNEL);
+	BUG_ON(!__uv_hub_info_list);
+
 	blade = 0;
 	for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
 		unsigned long present =
@@ -1040,28 +1060,37 @@ void __init uv_system_init(void)
 	uv_rtc_init();
 
 	for_each_present_cpu(cpu) {
+		struct uv_hub_info_s *new_hub = NULL;
 		int apicid = per_cpu(x86_cpu_to_apicid, cpu);
 		int nodeid = cpu_to_node(cpu);
-		int lcpu;
 
-		*uv_cpu_hub_info(cpu) = hub_info;	/* common hub values */
-		pnode = uv_apicid_to_pnode(apicid);
-		blade = boot_pnode_to_blade(pnode);
-		lcpu = uv_blade_info[blade].nr_possible_cpus;
-		uv_blade_info[blade].nr_possible_cpus++;
+		/* Allocate new per hub info list */
+		if (uv_hub_info_list(nodeid) == NULL) {
+			if (cpu == 0)
+				__uv_hub_info_list[0] = &uv_hub_info_node0;
+			else
+				__uv_hub_info_list[nodeid] =
+					kzalloc_node(bytes, GFP_KERNEL, nodeid);
+
+			new_hub = uv_hub_info_list(nodeid);
+			BUG_ON(!new_hub);
+			*new_hub = hub_info;
+			blade = boot_pnode_to_blade(new_hub->pnode);
+			new_hub->pnode = uv_apicid_to_pnode(apicid);
+			new_hub->numa_blade_id = blade;
+		}
 
 		/* Any node on the blade, else will contain -1. */
 		uv_blade_info[blade].memory_nid = nodeid;
 
-		uv_cpu_hub_info(cpu)->numa_blade_id = blade;
-		uv_cpu_hub_info(cpu)->pnode = pnode;
 		uv_node_to_blade[nodeid] = blade;
 		uv_cpu_to_blade[cpu] = blade;
 
 		/* Initialize per cpu info list */
-		uv_cpu_info_per(cpu)->p_uv_hub_info = uv_cpu_hub_info(cpu);
-		uv_cpu_info_per(cpu)->blade_cpu_id = lcpu;
+		uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid);
 		uv_cpu_info_per(cpu)->scir.offset = uv_scir_offset(apicid);
+		uv_cpu_info_per(cpu)->blade_cpu_id =
+			uv_blade_info[blade].nr_possible_cpus++;
 	}
 
 	/* Add blade/pnode info for nodes without cpus */

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ