[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1516813025-10794-6-git-send-email-dwmw@amazon.co.uk>
Date: Wed, 24 Jan 2018 16:57:04 +0000
From: David Woodhouse <dwmw@...zon.co.uk>
To: arjan@...ux.intel.com, tglx@...utronix.de, karahmed@...zon.de,
x86@...nel.org, linux-kernel@...r.kernel.org,
tim.c.chen@...ux.intel.com, bp@...en8.de, peterz@...radead.org,
pbonzini@...hat.com, ak@...ux.intel.com,
torvalds@...ux-foundation.org, gregkh@...ux-foundation.org,
dave.hansen@...el.com, gnomes@...rguk.ukuu.org.uk
Subject: [PATCH v3 5/6] x86/pti: Do not enable PTI on processors which are not vulnerable to Meltdown
Some old Atoms, anything in family 5 or 4, and newer CPUs when they advertise
the IA32_ARCH_CAPABILITIES MSR and it has the RDCL_NO bit set, are not vulnerable.
Roll the AMD exemption into the x86_match_cpu() table too.
Based on suggestions from Dave Hansen and Alan Cox.
Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
---
arch/x86/kernel/cpu/common.c | 34 ++++++++++++++++++++++++++++++++--
1 file changed, 32 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e5d66e9..3bc8a1f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -47,6 +47,8 @@
#include <asm/pat.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
@@ -853,6 +855,35 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
+static const __initdata struct x86_cpu_id cpu_no_meltdown[] = {
+ { X86_VENDOR_AMD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_ANY, 5 },
+ { X86_VENDOR_ANY, 4 },
+ {}
+};
+
+static bool __init early_cpu_vulnerable_meltdown(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = 0;
+
+ if (x86_match_cpu(cpu_no_meltdown))
+ return false;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return false;
+
+ return true;
+}
+
/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -900,9 +931,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
- if (c->x86_vendor != X86_VENDOR_AMD)
+ if (early_cpu_vulnerable_meltdown(c))
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
--
2.7.4
Powered by blists - more mailing lists