lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251120031925.87762-9-Smita.KoralahalliChannabasappa@amd.com>
Date: Thu, 20 Nov 2025 03:19:24 +0000
From: Smita Koralahalli <Smita.KoralahalliChannabasappa@....com>
To: <linux-cxl@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
	<nvdimm@...ts.linux.dev>, <linux-fsdevel@...r.kernel.org>,
	<linux-pm@...r.kernel.org>
CC: Alison Schofield <alison.schofield@...el.com>, Vishal Verma
	<vishal.l.verma@...el.com>, Ira Weiny <ira.weiny@...el.com>, Dan Williams
	<dan.j.williams@...el.com>, Jonathan Cameron <jonathan.cameron@...wei.com>,
	Yazen Ghannam <yazen.ghannam@....com>, Dave Jiang <dave.jiang@...el.com>,
	Davidlohr Bueso <dave@...olabs.net>, Matthew Wilcox <willy@...radead.org>,
	Jan Kara <jack@...e.cz>, "Rafael J . Wysocki" <rafael@...nel.org>, Len Brown
	<len.brown@...el.com>, Pavel Machek <pavel@...nel.org>, Li Ming
	<ming.li@...omail.com>, Jeff Johnson <jeff.johnson@....qualcomm.com>, "Ying
 Huang" <huang.ying.caritas@...il.com>, Yao Xingtao <yaoxt.fnst@...itsu.com>,
	Peter Zijlstra <peterz@...radead.org>, Greg KH <gregkh@...uxfoundation.org>,
	Nathan Fontenot <nathan.fontenot@....com>, Terry Bowman
	<terry.bowman@....com>, Robert Richter <rrichter@....com>, Benjamin Cheatham
	<benjamin.cheatham@....com>, Zhijian Li <lizhijian@...itsu.com>, "Borislav
 Petkov" <bp@...en8.de>, Ard Biesheuvel <ardb@...nel.org>
Subject: [PATCH v4 8/9] cxl/region, dax/hmem: Tear down CXL regions when HMEM reclaims Soft Reserved

If CXL regions do not fully cover a Soft Reserved span, HMEM takes
ownership. Tear down overlapping CXL regions before allowing HMEM to
register and online the memory.

Add cxl_region_teardown() to walk CXL regions overlapping a span and
unregister them via devm_release_action() and unregister_region().

Force the region state back to CXL_CONFIG_ACTIVE before unregistering to
prevent the teardown path from resetting decoders HMEM still relies on
to create its dax and online memory.

Co-developed-by: Alison Schofield <alison.schofield@...el.com>
Signed-off-by: Alison Schofield <alison.schofield@...el.com>
Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa@....com>
---
 drivers/cxl/core/region.c | 38 ++++++++++++++++++++++++++++++++++++++
 drivers/cxl/cxl.h         |  5 +++++
 drivers/dax/hmem/hmem.c   |  4 +++-
 3 files changed, 46 insertions(+), 1 deletion(-)

diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 38e7ec6a087b..266b24028df0 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -3784,6 +3784,44 @@ struct cxl_range_ctx {
 	bool found;
 };
 
+static int cxl_region_teardown_cb(struct device *dev, void *data)
+{
+	struct cxl_range_ctx *ctx = data;
+	struct cxl_root_decoder *cxlrd;
+	struct cxl_region_params *p;
+	struct cxl_region *cxlr;
+	struct cxl_port *port;
+
+	cxlr = cxlr_overlapping_range(dev, ctx->start, ctx->end);
+	if (!cxlr)
+		return 0;
+
+	cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+	port = cxlrd_to_port(cxlrd);
+	p = &cxlr->params;
+
+	/* Force the region state back to CXL_CONFIG_ACTIVE so that
+	 * unregister_region() does not run the full decoder reset path
+	 * which would invalidate the decoder programming that HMEM
+	 * relies on to create its DAX device and online the underlying
+	 * memory.
+	 */
+	scoped_guard(rwsem_write, &cxl_rwsem.region)
+		p->state = min(p->state, CXL_CONFIG_ACTIVE);
+
+	devm_release_action(port->uport_dev, unregister_region, cxlr);
+
+	return 0;
+}
+
+void cxl_region_teardown(resource_size_t start, resource_size_t end)
+{
+	struct cxl_range_ctx ctx = { .start = start, .end = end };
+
+	bus_for_each_dev(&cxl_bus_type, NULL, &ctx, cxl_region_teardown_cb);
+}
+EXPORT_SYMBOL_GPL(cxl_region_teardown);
+
 static void cxl_region_enable_dax(struct cxl_region *cxlr)
 {
 	struct cxl_region_params *p = &cxlr->params;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 414ddf6c35d7..a215a88ef59c 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -880,6 +880,7 @@ struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
 u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
 bool cxl_regions_fully_map(resource_size_t start, resource_size_t end);
 void cxl_register_dax(resource_size_t start, resource_size_t end);
+void cxl_region_teardown(resource_size_t start, resource_size_t end);
 #else
 static inline bool is_cxl_pmem_region(struct device *dev)
 {
@@ -911,6 +912,10 @@ static inline void cxl_register_dax(resource_size_t start,
 				    resource_size_t end)
 {
 }
+static inline void cxl_region_teardown(resource_size_t start,
+				       resource_size_t end)
+{
+}
 #endif
 
 void cxl_endpoint_parse_cdat(struct cxl_port *port);
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index b9312e0f2e62..7d874ee169ac 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -158,8 +158,10 @@ static int handle_deferred_cxl(struct device *host, int target_nid,
 		if (cxl_regions_fully_map(res->start, res->end)) {
 			dax_cxl_mode = DAX_CXL_MODE_DROP;
 			cxl_register_dax(res->start, res->end);
-		} else
+		} else {
 			dax_cxl_mode = DAX_CXL_MODE_REGISTER;
+			cxl_region_teardown(res->start, res->end);
+		}
 
 		hmem_register_device(host, target_nid, res);
 	}
-- 
2.17.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ