[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1504722964-9170-1-git-send-email-prarit@redhat.com>
Date: Wed, 6 Sep 2017 14:36:04 -0400
From: Prarit Bhargava <prarit@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Prarit Bhargava <prarit@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
Jean Delvare <jdelvare@...e.com>, Borislav Petkov <bp@...e.de>,
Andy Lutomirski <luto@...nel.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Michael Ellerman <mpe@...erman.id.au>,
Ville Syrjälä
<ville.syrjala@...ux.intel.com>
Subject: [PATCH] x86/smpboot: Fix __max_logical_packages estimate
A system booted with a small number of cores enabled per package
panics because the estimate of __max_logical_packages is too low.
This occurs when the total number of active cores across all packages
is less than the maximum core count for a single package.
ie) On a 4 package system with 20 cores/package where only 4 cores
are enabled on each package, the value of __max_logical_packages is
calculated as DIV_ROUND_UP(16 / 20) = 1 and not 4.
Improve the estimate of __max_logical_packages by comparing the current
estimate to the number of SMBIOS type 4 Processor Information structures.
If __max_logical_packages is still insufficient during boot use the largest
possible value of Package ID.
Signed-off-by: Prarit Bhargava <prarit@...hat.com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: x86@...nel.org
Cc: Jean Delvare <jdelvare@...e.com>
Cc: Borislav Petkov <bp@...e.de>
Cc: Andy Lutomirski <luto@...nel.org>
Cc: Tim Chen <tim.c.chen@...ux.intel.com>
Cc: Prarit Bhargava <prarit@...hat.com>
Cc: Vitaly Kuznetsov <vkuznets@...hat.com>
Cc: Michael Ellerman <mpe@...erman.id.au>
Cc: "Ville Syrjälä" <ville.syrjala@...ux.intel.com>
---
arch/x86/kernel/smpboot.c | 13 ++++++++++---
drivers/firmware/dmi_scan.c | 9 +++++++++
include/linux/dmi.h | 2 ++
3 files changed, 21 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 54b9e89d4d6b..54c59f3b3d01 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -56,6 +56,7 @@
#include <linux/stackprotector.h>
#include <linux/gfp.h>
#include <linux/cpuidle.h>
+#include <linux/dmi.h>
#include <asm/acpi.h>
#include <asm/desc.h>
@@ -298,7 +299,10 @@ int topology_update_package_map(unsigned int pkg, unsigned int cpu)
if (logical_packages >= __max_logical_packages) {
pr_warn("Package %u of CPU %u exceeds BIOS package data %u.\n",
logical_packages, cpu, __max_logical_packages);
- return -ENOSPC;
+ if (system_state == SYSTEM_BOOTING)
+ __max_logical_packages = max_physical_pkg_id;
+ else
+ return -ENOSPC;
}
new = logical_packages++;
@@ -332,6 +336,8 @@ static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu)
size_t size;
/*
+ * Estimate the number of logical packages.
+ *
* Today neither Intel nor AMD support heterogenous systems. That
* might change in the future....
*
@@ -360,6 +366,8 @@ static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu)
}
__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
+ __max_logical_packages = max_t(unsigned int, __max_logical_packages,
+ dmi_processor_count);
logical_packages = 0;
/*
@@ -373,9 +381,8 @@ static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu)
size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
physical_package_map = kzalloc(size, GFP_KERNEL);
- pr_info("Max logical packages: %u\n", __max_logical_packages);
-
topology_update_package_map(c->phys_proc_id, cpu);
+ pr_info("Max logical packages: %u\n", __max_logical_packages);
}
void __init smp_store_boot_cpu_info(void)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 783041964439..177a571e6776 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -420,6 +420,12 @@ void __init dmi_memdev_walk(void)
}
}
+unsigned int dmi_processor_count;
+void __init dmi_save_processor(const struct dmi_header *dm)
+{
+ dmi_processor_count++;
+}
+
/*
* Process a DMI table entry. Right now all we care about are the BIOS
* and machine entries. For 2.5 we should pull the smbus controller info
@@ -455,6 +461,9 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7);
dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8);
break;
+ case 4: /* Processor Information */
+ dmi_save_processor(dm);
+ break;
case 9: /* System Slots */
dmi_save_system_slot(dm);
break;
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 9bbf21a516e4..c03f6a5d55f7 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -112,6 +112,7 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
+extern unsigned int dmi_processor_count;
#else
@@ -143,6 +144,7 @@ static inline void dmi_memdev_name(u16 handle, const char **bank,
const char **device) { }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
+static unsigned int dmi_processor_count;
#endif
--
1.8.5.5
Powered by blists - more mailing lists