lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Mon, 23 Mar 2015 10:42:52 -0500
From:	Aravind Gopalakrishnan <Aravind.Gopalakrishnan@....com>
To:	<tglx@...utronix.de>, <mingo@...hat.com>, <hpa@...or.com>,
	<tony.luck@...el.com>, <bp@...en8.de>, <slaoub@...il.com>,
	<luto@...capital.net>, <x86@...nel.org>,
	<linux-kernel@...r.kernel.org>, <linux-edac@...r.kernel.org>
CC:	Aravind Gopalakrishnan <Aravind.Gopalakrishnan@....com>,
	"Aravind Gopalakrishnan" <aravind.gopalakrishnan@....com>
Subject: [PATCH V3 1/2] x86, mce, severities: Add AMD severities function

Add a severities function that caters to AMD processors.
This allows us to do some vendor specific work within the
function if necessary.

Also, introduce a vendor flag bitfield which contains vendor
specific flags. The severities code uses this to define error
scope based on the prescence of the flags field.

This is based off of work by Boris Petkov.

Testing details:
Tested the patch for any regressions on
Fam10h, Model 9h (Greyhound)
Fam15h: Models 0h-0fh (Orochi), 30h-3fh (Kaveri) and 60h-6fh (Carrizo),
Fam16h Model 00h-0fh (Kabini)

Signed-off-by: Aravind Gopalakrishnan <aravind.gopalakrishnan@....com>
---
Changes from V2:
 - Rebase on top of latest tip
 - Tested patch on more systems and updated commit message appropriately

Changes from V1:
 - Test mce_flags.overflow_recov once instead of multiple times

 arch/x86/include/asm/mce.h                |  6 ++++
 arch/x86/kernel/cpu/mcheck/mce-severity.c | 53 +++++++++++++++++++++++++++++++
 arch/x86/kernel/cpu/mcheck/mce.c          |  9 ++++++
 3 files changed, 68 insertions(+)

diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index fd38a23..b574fbf 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -116,6 +116,12 @@ struct mca_config {
 	u32 rip_msr;
 };
 
+struct mce_vendor_flags {
+	__u64		overflow_recov	: 1, /* cpuid_ebx(80000007) */
+			__reserved_0	: 63;
+};
+extern struct mce_vendor_flags mce_flags;
+
 extern struct mca_config mca_cfg;
 extern void mce_register_decode_chain(struct notifier_block *nb);
 extern void mce_unregister_decode_chain(struct notifier_block *nb);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 8bb4330..4f8f87d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -186,12 +186,65 @@ static int error_context(struct mce *m)
 	return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
 }
 
+/* keeping mce_severity_amd in sync with AMD error scope heirarchy table */
+static int mce_severity_amd(struct mce *m, enum context ctx)
+{
+	enum context ctx = error_context(m);
+	/* Processor Context Corrupt, no need to fumble too much, die! */
+	if (m->status & MCI_STATUS_PCC)
+		return MCE_PANIC_SEVERITY;
+
+	if (m->status & MCI_STATUS_UC) {
+		/*
+		 * On older systems, where overflow_recov flag is not
+		 * present, we should simply PANIC if Overflow occurs.
+		 * If overflow_recov flag set, then SW can try
+		 * to at least kill process to salvage systen operation.
+		 */
+
+		if (mce_flags.overflow_recov) {
+			/* software can try to contain */
+			if (!(m->mcgstatus & MCG_STATUS_RIPV))
+				if (ctx == IN_KERNEL)
+					return MCE_PANIC_SEVERITY;
+
+				/* kill current process */
+				return MCE_AR_SEVERITY;
+		} else {
+			/* at least one error was not logged */
+			if (m->status & MCI_STATUS_OVER)
+				return MCE_PANIC_SEVERITY;
+		}
+		/*
+		 * any other case, return MCE_UC_SEVERITY so that
+		 * we log the error and exit #MC handler.
+		 */
+		return MCE_UC_SEVERITY;
+	}
+
+	/*
+	 * deferred error: poll handler catches these and adds to mce_ring
+	 * so memory-failure can take recovery actions.
+	 */
+	if (m->status & MCI_STATUS_DEFERRED)
+		return MCE_DEFERRED_SEVERITY;
+
+	/*
+	 * corrected error: poll handler catches these and passes
+	 * responsibility of decoding the error to EDAC
+	 */
+	return MCE_KEEP_SEVERITY;
+}
+
 int mce_severity(struct mce *m, int tolerant, char **msg, bool is_excp)
 {
 	enum exception excp = (is_excp ? EXCP_CONTEXT : NO_EXCP);
 	enum context ctx = error_context(m);
 	struct severity *s;
 
+	if (m->cpuvendor == X86_VENDOR_AMD)
+		return mce_severity_amd(m, ctx);
+
 	for (s = severities;; s++) {
 		if ((m->status & s->mask) != s->result)
 			continue;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 3cc6793..03c7e0a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -65,6 +65,7 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
 DEFINE_PER_CPU(unsigned, mce_exception_count);
 
 struct mce_bank *mce_banks __read_mostly;
+struct mce_vendor_flags mce_flags __read_mostly;
 
 struct mca_config mca_cfg __read_mostly = {
 	.bootlog  = -1,
@@ -1533,6 +1534,13 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
 			mce_banks[0].ctl = 0;
 
 		/*
+		 * overflow_recov is supported for F15h Models 00h-0fh
+		 * even though we don't have cpuid bit for this
+		 */
+		if (c->x86 == 0x15 && c->x86_model <= 0xf)
+			mce_flags.overflow_recov = 1;
+
+		/*
 		 * Turn off MC4_MISC thresholding banks on those models since
 		 * they're not supported there.
 		 */
@@ -1631,6 +1639,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
 		break;
 	case X86_VENDOR_AMD:
 		mce_amd_feature_init(c);
+		mce_flags.overflow_recov = cpuid_ebx(0x80000007) & 0x1;
 		break;
 	default:
 		break;
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ