[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150603085805.GE4403@pd.tnic>
Date: Wed, 3 Jun 2015 10:58:05 +0200
From: Borislav Petkov <bp@...en8.de>
To: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@....com>
Cc: dougthompson@...ssion.com, mchehab@....samsung.com,
linux-edac@...r.kernel.org, linux-kernel@...r.kernel.org,
x86@...nel.org, Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, Borislav Petkov <bp@...e.de>,
Jacob Shin <jacob.w.shin@...il.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Andy Lutomirski <luto@...nel.org>,
Paolo Bonzini <pbonzini@...hat.com>
Subject: Re: [PATCH V2 7/9] x86, amd: Factor out number of nodes calculation
On Tue, Jun 02, 2015 at 03:36:00PM -0500, Aravind Gopalakrishnan wrote:
> Factoring out number of nodes calculation out of amd_get_topology()
> and saving the value in a static variable.
>
> A later patch will introduce a accessor for this value so we
> can use the information elsewhere in EDAC. The usage will be
> included in a future patch too.
>
> While at it, remove X86_HT #ifdefs around the code block for
> amd_get_topology() and it's caller amd_detect_cmp(). Since
> CONFIG_X86_HT defaults to Y, this code is always built-in.
> Besides, amd_get_topology() extracts necessary info from
> cpuid_[eax|ebx](0x8000001e) for platforms that are not 'HT'
...
> +static void amd_set_num_nodes(void)
> +{
> + if (cpu_has_topoext) {
> + u32 ecx;
> +
> + ecx = cpuid_ecx(0x8000001e);
> + nodes_per_processor = ((ecx >> 8) & 7) + 1;
> + } else if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
> + u64 value;
> +
> + rdmsrl(MSR_FAM10H_NODE_ID, value);
> + nodes_per_processor = ((value >> 3) & 7) + 1;
> + }
> +}
> +
> /*
> * Fixup core topology information for
> * (1) AMD multi-node processors
> * Assumption: Number of cores in each internal node is the same.
> * (2) AMD processors supporting compute units
> */
> -#ifdef CONFIG_X86_HT
> static void amd_get_topology(struct cpuinfo_x86 *c)
> {
> - u32 nodes, cores_per_cu = 1;
> + u32 cores_per_cu = 1;
> u8 node_id;
> int cpu = smp_processor_id();
>
> + amd_set_num_nodes();
When I said, you like to complicate stuff, I wasn't joking. :-)
See below for what I actually meant. Diff is ontop of the X86_HT removal
patch from today:
---
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5bd3a99dc20b..bdca795743e8 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -19,6 +19,8 @@
#include "cpu.h"
+static u32 nodes_per_processor = 1;
+
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
u32 gprs[8] = { 0 };
@@ -291,7 +293,7 @@ static int nearby_node(int apicid)
#ifdef CONFIG_SMP
static void amd_get_topology(struct cpuinfo_x86 *c)
{
- u32 nodes, cores_per_cu = 1;
+ u32 cores_per_cu = 1;
u8 node_id;
int cpu = smp_processor_id();
@@ -300,7 +302,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- nodes = ((ecx >> 8) & 7) + 1;
+ nodes_per_processor = ((ecx >> 8) & 7) + 1;
node_id = ecx & 7;
/* get compute unit information */
@@ -311,18 +313,18 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
- nodes = ((value >> 3) & 7) + 1;
+ nodes_per_processor = ((value >> 3) & 7) + 1;
node_id = value & 7;
} else
return;
/* fixup multi-node processor information */
- if (nodes > 1) {
+ if (nodes_per_processor > 1) {
u32 cores_per_node;
u32 cus_per_node;
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
- cores_per_node = c->x86_max_cores / nodes;
+ cores_per_node = c->x86_max_cores / nodes_per_processor;
cus_per_node = cores_per_node / cores_per_cu;
/* store NodeID, use llc_shared_map to store sibling info */
--
Regards/Gruss,
Boris.
ECO tip #101: Trim your mails when you reply.
--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists