lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1452809140-3328-6-git-send-email-Aravind.Gopalakrishnan@amd.com>
Date:	Thu, 14 Jan 2016 16:05:40 -0600
From:	Aravind Gopalakrishnan <Aravind.Gopalakrishnan@....com>
To:	<tony.luck@...el.com>, <bp@...en8.de>, <tglx@...utronix.de>,
	<mingo@...hat.com>, <hpa@...or.com>
CC:	<x86@...nel.org>, <linux-edac@...r.kernel.org>,
	<linux-kernel@...r.kernel.org>,
	Aravind Gopalakrishnan <Aravind.Gopalakrishnan@....com>
Subject: [PATCH 5/5] x86/mcheck/AMD: Set MCAX Enable bit

It is required for OS to acknowledge that it is using
the MCAX register set and its associated fields by setting
the 'McaXEnable' bit in each bank's MCi_CONFIG register. If
it is not set, then all UC errors will cause a system panic.

So setting the bit here and also defining the new MSR range for
SMCA enabled proccessors in msr-index

Signed-off-by: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@....com>
---
 arch/x86/include/asm/msr-index.h     | 23 +++++++++++++++++++++++
 arch/x86/kernel/cpu/mcheck/mce_amd.c | 12 ++++++++++++
 2 files changed, 35 insertions(+)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b05402e..88505f8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -264,6 +264,29 @@
 #define MSR_IA32_MC0_CTL2		0x00000280
 #define MSR_IA32_MCx_CTL2(x)		(MSR_IA32_MC0_CTL2 + (x))
 
+/* SMCA defined MSR register set for AMD64 */
+#define MSR_AMD64_SMCA_MC0_CTL		0xc0002000
+#define MSR_AMD64_SMCA_MC0_STATUS	0xc0002001
+#define MSR_AMD64_SMCA_MC0_ADDR		0xc0002002
+#define MSR_AMD64_SMCA_MC0_MISC0	0xc0002003
+#define MSR_AMD64_SMCA_MC0_CONFIG	0xc0002004
+#define MSR_AMD64_SMCA_MC0_IPID		0xc0002005
+#define MSR_AMD64_SMCA_MC0_SYND		0xc0002006
+#define MSR_AMD64_SMCA_MC0_DESTAT	0xc0002008
+#define MSR_AMD64_SMCA_MC0_DEADDR	0xc0002009
+#define MSR_AMD64_SMCA_MC0_MISC1	0xc000200a
+
+#define MSR_AMD64_SMCA_MCx_CTL(x)	(MSR_AMD64_SMCA_MC0_CTL + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_STATUS(x)	(MSR_AMD64_SMCA_MC0_STATUS + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_ADDR(x)	(MSR_AMD64_SMCA_MC0_ADDR + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_MISC(x)	(MSR_AMD64_SMCA_MC0_MISC0 + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_CONFIG(x)	(MSR_AMD64_SMCA_MC0_CONFIG + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_IPID(x)	(MSR_AMD64_SMCA_MC0_IPID + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_SYND(x)	(MSR_AMD64_SMCA_MC0_SYND + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_DESTAT(x)	(MSR_AMD64_SMCA_MC0_DESTAT + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_DEADDR(x)	(MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x))
+#define MSR_AMD64_SMCA_MCx_MISCy(x, y)	((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x)))
+
 #define MSR_P6_PERFCTR0			0x000000c1
 #define MSR_P6_PERFCTR1			0x000000c2
 #define MSR_P6_EVNTSEL0			0x00000186
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 4383d75..ae6fcca 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -51,6 +51,7 @@
 
 /* SMCA settings */
 #define SMCA_THR_LVT_OFF	0xF000
+#define SMCA_MCAX_EN_OFF	0x1
 
 static const char * const th_names[] = {
 	"load_store",
@@ -316,6 +317,17 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
 
 			if (mce_flags.smca) {
 				u32 smca_low = 0, smca_high = 0;
+				u32 smca_addr = 0;
+
+				/* Set MCAXEnable bit for each bank */
+				smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
+				if (rdmsr_safe(smca_addr,
+					       &smca_low,
+					       &smca_high))
+					continue;
+
+				smca_high = (smca_high & ~SMCA_MCAX_EN_OFF) | 0x1;
+				wrmsr(smca_addr, smca_low, smca_high);
 
 				/* Gather LVT offset for thresholding */
 				if (rdmsr_safe(MSR_CU_DEF_ERR,
-- 
2.7.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ