lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20251230075951.85252-1-tianruidong@linux.alibaba.com>
Date: Tue, 30 Dec 2025 15:59:51 +0800
From: Ruidong Tian <tianruidong@...ux.alibaba.com>
To: dan.j.williams@...el.com,
	vishal.l.verma@...el.com,
	dave.jiang@...el.com,
	tony.luck@...el.com,
	bp@...en8.de,
	linux-cxl@...r.kernel.org,
	linux-edac@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	xueshuai@...ux.alibaba.com
Cc: Ruidong Tian <tianruidong@...ux.alibaba.com>
Subject: [RFC PATCH] device/dax: Allow MCE recovery when accessing PFN metadata

Both fsdax and devdax modes require significant space to store Page Frame
Number (PFN) metadata (struct page). For a 1TiB namespace, approximately
17.18GiB of metadata is needed[0]. As namespace sizes scale, hardware
memory errors within this metadata region become increasingly frequent.

Currently, the kernel treats any access to corrupted PFN metadata as an
unrecoverable event, leading to an immediate system panic. However, in
DAX scenarios (e.g., CXL-attached memory), the impact of metadata
corruption is logically confined to the physical device backing that
specific memory range.

Instead of a global panic, the kernel can ideally localize the failure.
By allowing the affected DAX memory range to be offlined or the specific
device to be decommissioned, we can limit the blast radius of hardware
errors. This enables other processes to migrate or exit gracefully
rather than being terminated by a system-wide crash.

Reproduce and testing:
1. Inject error to PFN metadata
2. mmap and read

Before apply this patch, kernel will panic:
  CPU 120: Machine Check Exception: f Bank 1: bd80000000100134
  RIP 10:<ffffffff8598300e> {dax_set_mapping.isra.0+0xce/0x140}
  TSC ee24b9e2d5 ADDR b213398000 MISC 86 PPIN 6deeb6484732971d
  PROCESSOR 0:a06d1 TIME 1765336050 SOCKET 0 APIC b1 microcode 10003f3
  Run the above through 'mcelog --ascii'
  Machine check: Data load in unrecoverable area of kernel
Kernel panic - not syncing: Fatal local machine check

After apply this patch:
User application receive SIGBUS, system still alive.

[0]: https://docs.pmem.io/ndctl-user-guide/managing-namespaces#fsdax-and-devdax-capacity-considerations

Signed-off-by: Ruidong Tian <tianruidong@...ux.alibaba.com>
---
 drivers/dax/dax-private.h | 26 ++++++++++++++++++++++++++
 drivers/dax/device.c      | 20 ++++++++++++++++----
 2 files changed, 42 insertions(+), 4 deletions(-)

diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 0867115aeef2..84325963fa3d 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -129,4 +129,30 @@ static inline bool dax_align_valid(unsigned long align)
 	return align == PAGE_SIZE;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef copy_mc_to_kernel
+static inline int dax_test_page_mc(const struct page *page)
+{
+	return 0;
+}
+static inline int dax_test_folio_mc(const struct folio *page)
+{
+	return 0;
+}
+#else
+#include <linux/uaccess.h>
+static inline int dax_test_page_mc(const struct page *page)
+{
+	struct page _p;
+
+	return copy_mc_to_kernel(&_p, page, sizeof(struct page));
+}
+static inline int dax_test_folio_mc(const struct folio *folio)
+{
+	struct folio _f;
+
+	return copy_mc_to_kernel(&_f, folio, sizeof(struct folio));
+}
+#endif
+#endif
 #endif
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 22999a402e02..a7f2217b9b62 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -80,7 +80,7 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
 	return -1;
 }
 
-static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
+static int dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
 			      unsigned long fault_size)
 {
 	unsigned long i, nr_pages = fault_size / PAGE_SIZE;
@@ -95,6 +95,13 @@ static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
 	pgoff = linear_page_index(vmf->vma,
 			ALIGN_DOWN(vmf->address, fault_size));
 
+	for (i = 0; i < nr_pages; i++) {
+		struct page *p = pfn_to_page(pfn + i);
+
+		if (dax_test_page_mc(p) || dax_test_page_mc(page_folio(p)))
+			return -EFAULT;
+	}
+
 	for (i = 0; i < nr_pages; i++) {
 		struct folio *folio = pfn_folio(pfn + i);
 
@@ -104,6 +111,8 @@ static void dax_set_mapping(struct vm_fault *vmf, unsigned long pfn,
 		folio->mapping = filp->f_mapping;
 		folio->index = pgoff + i;
 	}
+
+	return 0;
 }
 
 static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
@@ -134,7 +143,8 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
 
 	pfn = PHYS_PFN(phys);
 
-	dax_set_mapping(vmf, pfn, fault_size);
+	if (dax_set_mapping(vmf, pfn, fault_size))
+		return VM_FAULT_SIGBUS;
 
 	return vmf_insert_page_mkwrite(vmf, pfn_to_page(pfn),
 					vmf->flags & FAULT_FLAG_WRITE);
@@ -178,7 +188,8 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
 
 	pfn = PHYS_PFN(phys);
 
-	dax_set_mapping(vmf, pfn, fault_size);
+	if (dax_set_mapping(vmf, pfn, fault_size))
+		return VM_FAULT_SIGBUS;
 
 	return vmf_insert_folio_pmd(vmf, page_folio(pfn_to_page(pfn)),
 				vmf->flags & FAULT_FLAG_WRITE);
@@ -224,7 +235,8 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
 
 	pfn = PHYS_PFN(phys);
 
-	dax_set_mapping(vmf, pfn, fault_size);
+	if (dax_set_mapping(vmf, pfn, fault_size))
+		return VM_FAULT_SIGBUS;
 
 	return vmf_insert_folio_pud(vmf, page_folio(pfn_to_page(pfn)),
 				vmf->flags & FAULT_FLAG_WRITE);
-- 
2.33.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ