lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251120031925.87762-8-Smita.KoralahalliChannabasappa@amd.com>
Date: Thu, 20 Nov 2025 03:19:23 +0000
From: Smita Koralahalli <Smita.KoralahalliChannabasappa@....com>
To: <linux-cxl@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
	<nvdimm@...ts.linux.dev>, <linux-fsdevel@...r.kernel.org>,
	<linux-pm@...r.kernel.org>
CC: Alison Schofield <alison.schofield@...el.com>, Vishal Verma
	<vishal.l.verma@...el.com>, Ira Weiny <ira.weiny@...el.com>, Dan Williams
	<dan.j.williams@...el.com>, Jonathan Cameron <jonathan.cameron@...wei.com>,
	Yazen Ghannam <yazen.ghannam@....com>, Dave Jiang <dave.jiang@...el.com>,
	Davidlohr Bueso <dave@...olabs.net>, Matthew Wilcox <willy@...radead.org>,
	Jan Kara <jack@...e.cz>, "Rafael J . Wysocki" <rafael@...nel.org>, Len Brown
	<len.brown@...el.com>, Pavel Machek <pavel@...nel.org>, Li Ming
	<ming.li@...omail.com>, Jeff Johnson <jeff.johnson@....qualcomm.com>, "Ying
 Huang" <huang.ying.caritas@...il.com>, Yao Xingtao <yaoxt.fnst@...itsu.com>,
	Peter Zijlstra <peterz@...radead.org>, Greg KH <gregkh@...uxfoundation.org>,
	Nathan Fontenot <nathan.fontenot@....com>, Terry Bowman
	<terry.bowman@....com>, Robert Richter <rrichter@....com>, Benjamin Cheatham
	<benjamin.cheatham@....com>, Zhijian Li <lizhijian@...itsu.com>, "Borislav
 Petkov" <bp@...en8.de>, Ard Biesheuvel <ardb@...nel.org>
Subject: [PATCH v4 7/9] cxl/region, dax/hmem: Register cxl_dax only when CXL owns Soft Reserved span

Register DAX from the HMEM path only after determining that CXL owns Soft
Reserved range. This avoids onlining memory under CXL before ownership is
finalized and prevents failed teardown when HMEM must reclaim the range.

Introduce cxl_register_dax() to walk overlapping CXL regions and register
DAX from CXL only when cxl_regions_fully_map() confirms full coverage of
the span. If CXL does not own the span, skip cxl_dax setup and allow HMEM
to register DAX and online memory.

With probe time DAX creation already suppressed in the previous patch,
this change ensures that only the single owner (CXL or HMEM) performs
DAX/KMEM setup.

Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa@....com>
---
 drivers/cxl/core/region.c | 42 +++++++++++++++++++++++++++++++++++++++
 drivers/cxl/cxl.h         |  5 +++++
 drivers/dax/hmem/hmem.c   |  5 +++--
 3 files changed, 50 insertions(+), 2 deletions(-)

diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index c17cd8706b9d..38e7ec6a087b 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -3784,6 +3784,48 @@ struct cxl_range_ctx {
 	bool found;
 };
 
+static void cxl_region_enable_dax(struct cxl_region *cxlr)
+{
+	struct cxl_region_params *p = &cxlr->params;
+	int rc;
+
+	if (walk_iomem_res_desc(IORES_DESC_NONE,
+				IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
+				p->res->start, p->res->end, cxlr,
+				is_system_ram) > 0)
+		return;
+
+	rc = devm_cxl_add_dax_region(cxlr);
+	if (rc)
+		dev_warn(&cxlr->dev, "failed to add DAX for %s: %d\n",
+			 dev_name(&cxlr->dev), rc);
+}
+
+static int cxl_register_dax_cb(struct device *dev, void *data)
+{
+	struct cxl_range_ctx *ctx = data;
+	struct cxl_region *cxlr;
+
+	cxlr = cxlr_overlapping_range(dev, ctx->start, ctx->end);
+	if (!cxlr)
+		return 0;
+
+	if (cxlr->mode != CXL_PARTMODE_RAM)
+		return 0;
+
+	cxl_region_enable_dax(cxlr);
+
+	return 0;
+}
+
+void cxl_register_dax(resource_size_t start, resource_size_t end)
+{
+	struct cxl_range_ctx ctx = { .start = start, .end = end };
+
+	bus_for_each_dev(&cxl_bus_type, NULL, &ctx, cxl_register_dax_cb);
+}
+EXPORT_SYMBOL_GPL(cxl_register_dax);
+
 static int cxl_region_map_cb(struct device *dev, void *data)
 {
 	struct cxl_range_ctx *ctx = data;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 324220596890..414ddf6c35d7 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -879,6 +879,7 @@ int cxl_add_to_region(struct cxl_endpoint_decoder *cxled);
 struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
 u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
 bool cxl_regions_fully_map(resource_size_t start, resource_size_t end);
+void cxl_register_dax(resource_size_t start, resource_size_t end);
 #else
 static inline bool is_cxl_pmem_region(struct device *dev)
 {
@@ -906,6 +907,10 @@ static inline bool cxl_regions_fully_map(resource_size_t start,
 {
 	return false;
 }
+static inline void cxl_register_dax(resource_size_t start,
+				    resource_size_t end)
+{
+}
 #endif
 
 void cxl_endpoint_parse_cdat(struct cxl_port *port);
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index db4c46337ac3..b9312e0f2e62 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -155,9 +155,10 @@ static int handle_deferred_cxl(struct device *host, int target_nid,
 	if (region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
 			      IORES_DESC_CXL) != REGION_DISJOINT) {
 
-		if (cxl_regions_fully_map(res->start, res->end))
+		if (cxl_regions_fully_map(res->start, res->end)) {
 			dax_cxl_mode = DAX_CXL_MODE_DROP;
-		else
+			cxl_register_dax(res->start, res->end);
+		} else
 			dax_cxl_mode = DAX_CXL_MODE_REGISTER;
 
 		hmem_register_device(host, target_nid, res);
-- 
2.17.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ