lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250507072145.3614298-8-dan.j.williams@intel.com>
Date: Wed, 7 May 2025 00:21:45 -0700
From: Dan Williams <dan.j.williams@...el.com>
To: <linux-cxl@...r.kernel.org>
CC: <linux-kernel@...r.kernel.org>, David Lechner <dlechner@...libre.com>,
	Peter Zijlstra <peterz@...radead.org>, Linus Torvalds
	<torvalds@...ux-foundation.org>, Ingo Molnar <mingo@...nel.org>, "Fabio M. De
 Francesco" <fabio.maria.de.francesco@...ux.intel.com>, Davidlohr Bueso
	<dave@...olabs.net>, Jonathan Cameron <jonathan.cameron@...wei.com>, "Dave
 Jiang" <dave.jiang@...el.com>, Alison Schofield <alison.schofield@...el.com>,
	Vishal Verma <vishal.l.verma@...el.com>, Ira Weiny <ira.weiny@...el.com>
Subject: [PATCH 7/7] cleanup: Create an rwsem conditional acquisition class

Introduce 'struct rw_semaphore_acquire' with the following helpers:

[scoped_]guard(rwsem_read_acquire)(...)
[scoped_]guard(rwsem_write_acquire)(...)
CLASS(rwsem_read_intr_acquire, ...)
CLASS(rwsem_write_kill_acquire, ...)
CLASS(rwsem_read_try_acquire, ...)
CLASS(rwsem_write_try_acquire, ...)

...and convert the CXL 'struct rw_semaphore' instances to 'struct
rw_semaphore_acquire'.

Recall that the "_acquire" flavor of locking primitives do not support
explicit 'lock' and 'unlock' operations, only guard() and CLASS(). This
mandates some pre-work before this mechanism can be adopted in a subsystem.
Specifically, some explicit unlock patterns in the CXL subsystem
(mid-function unlock and "reverse" locking patterns) were refactored into
sequences that are amenable to scope-based unlock.

The result is easier to read (once CLASS() semantics are understood)
smaller code that avoids the re-indentation implied by scoped_cond_guard().

One nit though is that 'struct rw_semaphore_acquire' is not compatible with
lockdep APIs. So, for now, open-code the type-conversion back to the raw
'struct rw_semaphore'.

Cc: David Lechner <dlechner@...libre.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: "Fabio M. De Francesco" <fabio.maria.de.francesco@...ux.intel.com>
Cc: Davidlohr Bueso <dave@...olabs.net>
Cc: Jonathan Cameron <jonathan.cameron@...wei.com>
Cc: Dave Jiang <dave.jiang@...el.com>
Cc: Alison Schofield <alison.schofield@...el.com>
Cc: Vishal Verma <vishal.l.verma@...el.com>
Cc: Ira Weiny <ira.weiny@...el.com>
Signed-off-by: Dan Williams <dan.j.williams@...el.com>
---
 drivers/cxl/core/cdat.c   |   6 +-
 drivers/cxl/core/core.h   |  34 ++---
 drivers/cxl/core/hdm.c    |  38 +++--
 drivers/cxl/core/mbox.c   |   6 +-
 drivers/cxl/core/memdev.c |  62 ++++-----
 drivers/cxl/core/port.c   |  12 +-
 drivers/cxl/core/region.c | 286 +++++++++++++++-----------------------
 include/linux/rwsem.h     |  37 +++++
 8 files changed, 219 insertions(+), 262 deletions(-)

diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index edb4f41eeacc..dadfcf6fb84c 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -336,7 +336,7 @@ static int match_cxlrd_hb(struct device *dev, void *data)
 	cxlrd = to_cxl_root_decoder(dev);
 	cxlsd = &cxlrd->cxlsd;
 
-	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read_acquire)(&cxl_region_rwsem);
 	for (int i = 0; i < cxlsd->nr_targets; i++) {
 		if (host_bridge == cxlsd->target[i]->dport_dev)
 			return 1;
@@ -987,7 +987,7 @@ void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr)
 	bool is_root;
 	int rc;
 
-	lockdep_assert_held(&cxl_dpa_rwsem);
+	lockdep_assert_held(&cxl_dpa_rwsem.rw_semaphore);
 
 	struct xarray *usp_xa __free(free_perf_xa) =
 		kzalloc(sizeof(*usp_xa), GFP_KERNEL);
@@ -1057,7 +1057,7 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
 {
 	struct cxl_dpa_perf *perf;
 
-	lockdep_assert_held(&cxl_dpa_rwsem);
+	lockdep_assert_held(&cxl_dpa_rwsem.rw_semaphore);
 
 	perf = cxled_get_dpa_perf(cxled);
 	if (IS_ERR(perf))
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 44b09552f44e..23afbbd650b7 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -107,8 +107,8 @@ u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
 #define PCI_RCRB_CAP_HDR_NEXT_MASK	GENMASK(15, 8)
 #define PCI_CAP_EXP_SIZEOF		0x3c
 
-extern struct rw_semaphore cxl_dpa_rwsem;
-extern struct rw_semaphore cxl_region_rwsem;
+extern struct rw_semaphore_acquire cxl_dpa_rwsem;
+extern struct rw_semaphore_acquire cxl_region_rwsem;
 
 DEFINE_CLASS(
 	cxl_decoder_detach, struct cxl_region *,
@@ -117,22 +117,24 @@ DEFINE_CLASS(
 		put_device(&_T->dev);
 	},
 	({
-		int rc = 0;
-
 		/* when the decoder is being destroyed lock unconditionally */
-		if (mode == DETACH_INVALIDATE)
-			down_write(&cxl_region_rwsem);
-		else
-			rc = down_write_killable(&cxl_region_rwsem);
-
-		if (rc)
-			cxlr = ERR_PTR(rc);
-		else {
-			cxlr = cxl_decoder_detach(cxlr, cxled, pos, mode);
-			get_device(&cxlr->dev);
+		if (mode == DETACH_INVALIDATE) {
+			scoped_guard(rwsem_write_acquire, &cxl_region_rwsem) {
+				cxlr = cxl_decoder_detach(cxlr, cxled, pos,
+							  mode);
+				if (!IS_ERR_OR_NULL(cxlr))
+					get_device(&cxlr->dev);
+			}
+		} else {
+			CLASS(rwsem_write_kill_acquire, rwsem)(&cxl_region_rwsem);
+			if (IS_ERR(rwsem))
+				cxlr = ERR_CAST(rwsem);
+			else
+				cxlr = cxl_decoder_detach(cxlr, cxled, pos,
+							  mode);
+			if (!IS_ERR_OR_NULL(cxlr))
+				get_device(&cxlr->dev);
 		}
-		up_write(&cxl_region_rwsem);
-
 		cxlr;
 	}),
 	struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled, int pos,
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 1c195f495a59..1624f066fde5 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -16,7 +16,7 @@
  * for enumerating these registers and capabilities.
  */
 
-DECLARE_RWSEM(cxl_dpa_rwsem);
+DECLARE_RWSEM_ACQUIRE(cxl_dpa_rwsem);
 
 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
 			   int *target_map)
@@ -213,7 +213,7 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
 {
 	struct resource *p1, *p2;
 
-	guard(rwsem_read)(&cxl_dpa_rwsem);
+	guard(rwsem_read_acquire)(&cxl_dpa_rwsem);
 	for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
 		__cxl_dpa_debug(file, p1, 0);
 		for (p2 = p1->child; p2; p2 = p2->sibling)
@@ -265,7 +265,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
 	struct resource *res = cxled->dpa_res;
 	resource_size_t skip_start;
 
-	lockdep_assert_held_write(&cxl_dpa_rwsem);
+	lockdep_assert_held_write(&cxl_dpa_rwsem.rw_semaphore);
 
 	/* save @skip_start, before @res is released */
 	skip_start = res->start - cxled->skip;
@@ -280,7 +280,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
 
 static void cxl_dpa_release(void *cxled)
 {
-	guard(rwsem_write)(&cxl_dpa_rwsem);
+	guard(rwsem_write_acquire)(&cxl_dpa_rwsem);
 	__cxl_dpa_release(cxled);
 }
 
@@ -292,7 +292,7 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
 {
 	struct cxl_port *port = cxled_to_port(cxled);
 
-	lockdep_assert_held_write(&cxl_dpa_rwsem);
+	lockdep_assert_held_write(&cxl_dpa_rwsem.rw_semaphore);
 	devm_remove_action(&port->dev, cxl_dpa_release, cxled);
 	__cxl_dpa_release(cxled);
 }
@@ -360,7 +360,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
 	struct resource *res;
 	int rc;
 
-	lockdep_assert_held_write(&cxl_dpa_rwsem);
+	lockdep_assert_held_write(&cxl_dpa_rwsem.rw_semaphore);
 
 	if (!len) {
 		dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
@@ -469,7 +469,7 @@ int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
 {
 	struct device *dev = cxlds->dev;
 
-	guard(rwsem_write)(&cxl_dpa_rwsem);
+	guard(rwsem_write_acquire)(&cxl_dpa_rwsem);
 
 	if (cxlds->nr_partitions)
 		return -EBUSY;
@@ -515,9 +515,8 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
 	struct cxl_port *port = cxled_to_port(cxled);
 	int rc;
 
-	down_write(&cxl_dpa_rwsem);
-	rc = __cxl_dpa_reserve(cxled, base, len, skipped);
-	up_write(&cxl_dpa_rwsem);
+	scoped_guard(rwsem_write_acquire, &cxl_dpa_rwsem)
+		rc = __cxl_dpa_reserve(cxled, base, len, skipped);
 
 	if (rc)
 		return rc;
@@ -528,7 +527,7 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
 
 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
 {
-	guard(rwsem_read)(&cxl_dpa_rwsem);
+	guard(rwsem_read_acquire)(&cxl_dpa_rwsem);
 	if (cxled->dpa_res)
 		return resource_size(cxled->dpa_res);
 
@@ -539,7 +538,7 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
 {
 	resource_size_t base = -1;
 
-	lockdep_assert_held(&cxl_dpa_rwsem);
+	lockdep_assert_held(&cxl_dpa_rwsem.rw_semaphore);
 	if (cxled->dpa_res)
 		base = cxled->dpa_res->start;
 
@@ -551,7 +550,7 @@ int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
 	struct cxl_port *port = cxled_to_port(cxled);
 	struct device *dev = &cxled->cxld.dev;
 
-	guard(rwsem_write)(&cxl_dpa_rwsem);
+	guard(rwsem_write_acquire)(&cxl_dpa_rwsem);
 	if (!cxled->dpa_res)
 		return 0;
 	if (cxled->cxld.region) {
@@ -581,7 +580,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
 	struct device *dev = &cxled->cxld.dev;
 	int part;
 
-	guard(rwsem_write)(&cxl_dpa_rwsem);
+	guard(rwsem_write_acquire)(&cxl_dpa_rwsem);
 	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
 		return -EBUSY;
 
@@ -613,7 +612,7 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long lon
 	struct resource *p, *last;
 	int part;
 
-	guard(rwsem_write)(&cxl_dpa_rwsem);
+	guard(rwsem_write_acquire)(&cxl_dpa_rwsem);
 	if (cxled->cxld.region) {
 		dev_dbg(dev, "decoder attached to %s\n",
 			dev_name(&cxled->cxld.region->dev));
@@ -841,9 +840,8 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
 		}
 	}
 
-	down_read(&cxl_dpa_rwsem);
-	setup_hw_decoder(cxld, hdm);
-	up_read(&cxl_dpa_rwsem);
+	scoped_guard(rwsem_read_acquire, &cxl_dpa_rwsem)
+		setup_hw_decoder(cxld, hdm);
 
 	port->commit_end++;
 	rc = cxld_await_commit(hdm, cxld->id);
@@ -881,7 +879,7 @@ void cxl_port_commit_reap(struct cxl_decoder *cxld)
 {
 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
 
-	lockdep_assert_held_write(&cxl_region_rwsem);
+	lockdep_assert_held_write(&cxl_region_rwsem.rw_semaphore);
 
 	/*
 	 * Once the highest committed decoder is disabled, free any other
@@ -1029,7 +1027,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
 		else
 			cxld->target_type = CXL_DECODER_DEVMEM;
 
-		guard(rwsem_write)(&cxl_region_rwsem);
+		guard(rwsem_write_acquire)(&cxl_region_rwsem);
 		if (cxld->id != cxl_num_decoders_committed(port)) {
 			dev_warn(&port->dev,
 				 "decoder%d.%d: Committed out of order\n",
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index cec9dfb22567..9f17e9072e60 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -909,8 +909,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
 		 * translations. Take topology mutation locks and lookup
 		 * { HPA, REGION } from { DPA, MEMDEV } in the event record.
 		 */
-		guard(rwsem_read)(&cxl_region_rwsem);
-		guard(rwsem_read)(&cxl_dpa_rwsem);
+		guard(rwsem_read_acquire)(&cxl_region_rwsem);
+		guard(rwsem_read_acquire)(&cxl_dpa_rwsem);
 
 		dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
 		cxlr = cxl_dpa_to_region(cxlmd, dpa);
@@ -1258,7 +1258,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
 	/* synchronize with cxl_mem_probe() and decoder write operations */
 	guard(device)(&cxlmd->dev);
 	endpoint = cxlmd->endpoint;
-	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read_acquire)(&cxl_region_rwsem);
 	/*
 	 * Require an endpoint to be safe otherwise the driver can not
 	 * be sure that the device is unmapped.
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index a16a5886d40a..8183f8099f89 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -231,15 +231,13 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
 	if (!port || !is_cxl_endpoint(port))
 		return -EINVAL;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_read_intr_acquire, region_rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(region_rwsem))
+		return PTR_ERR(region_rwsem);
 
-	rc = down_read_interruptible(&cxl_dpa_rwsem);
-	if (rc) {
-		up_read(&cxl_region_rwsem);
-		return rc;
-	}
+	CLASS(rwsem_read_intr_acquire, dpa_rwsem)(&cxl_dpa_rwsem);
+	if (IS_ERR(dpa_rwsem))
+		return PTR_ERR(dpa_rwsem);
 
 	if (cxl_num_decoders_committed(port) == 0) {
 		/* No regions mapped to this memdev */
@@ -248,8 +246,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
 		/* Regions mapped, collect poison by endpoint */
 		rc =  cxl_get_poison_by_endpoint(port);
 	}
-	up_read(&cxl_dpa_rwsem);
-	up_read(&cxl_region_rwsem);
 
 	return rc;
 }
@@ -291,19 +287,17 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 		return 0;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_read_intr_acquire, region_rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(region_rwsem))
+		return PTR_ERR(region_rwsem);
 
-	rc = down_read_interruptible(&cxl_dpa_rwsem);
-	if (rc) {
-		up_read(&cxl_region_rwsem);
-		return rc;
-	}
+	CLASS(rwsem_read_intr_acquire, dpa_rwsem)(&cxl_dpa_rwsem);
+	if (IS_ERR(dpa_rwsem))
+		return PTR_ERR(dpa_rwsem);
 
 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
 	if (rc)
-		goto out;
+		return rc;
 
 	inject.address = cpu_to_le64(dpa);
 	mbox_cmd = (struct cxl_mbox_cmd) {
@@ -313,7 +307,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
 	};
 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 	if (rc)
-		goto out;
+		return rc;
 
 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
 	if (cxlr)
@@ -326,11 +320,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
 		.length = cpu_to_le32(1),
 	};
 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT);
-out:
-	up_read(&cxl_dpa_rwsem);
-	up_read(&cxl_region_rwsem);
 
-	return rc;
+	return 0;
 }
 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
 
@@ -346,19 +337,17 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 		return 0;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_read_intr_acquire, region_rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(region_rwsem))
+		return PTR_ERR(region_rwsem);
 
-	rc = down_read_interruptible(&cxl_dpa_rwsem);
-	if (rc) {
-		up_read(&cxl_region_rwsem);
-		return rc;
-	}
+	CLASS(rwsem_read_intr_acquire, dpa_rwsem)(&cxl_dpa_rwsem);
+	if (IS_ERR(dpa_rwsem))
+		return PTR_ERR(dpa_rwsem);
 
 	rc = cxl_validate_poison_dpa(cxlmd, dpa);
 	if (rc)
-		goto out;
+		return rc;
 
 	/*
 	 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command
@@ -377,7 +366,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
 
 	rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
 	if (rc)
-		goto out;
+		return rc;
 
 	cxlr = cxl_dpa_to_region(cxlmd, dpa);
 	if (cxlr)
@@ -390,11 +379,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
 		.length = cpu_to_le32(1),
 	};
 	trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR);
-out:
-	up_read(&cxl_dpa_rwsem);
-	up_read(&cxl_region_rwsem);
 
-	return rc;
+	return 0;
 }
 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
 
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 20b65f13bded..a157aef2cd06 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -34,14 +34,14 @@
  * All changes to the interleave configuration occur with this lock held
  * for write.
  */
-DECLARE_RWSEM(cxl_region_rwsem);
+DECLARE_RWSEM_ACQUIRE(cxl_region_rwsem);
 
 static DEFINE_IDA(cxl_port_ida);
 static DEFINE_XARRAY(cxl_root_buses);
 
 int cxl_num_decoders_committed(struct cxl_port *port)
 {
-	lockdep_assert_held(&cxl_region_rwsem);
+	lockdep_assert_held(&cxl_region_rwsem.rw_semaphore);
 
 	return port->commit_end + 1;
 }
@@ -176,7 +176,7 @@ static ssize_t target_list_show(struct device *dev,
 	ssize_t offset;
 	int rc;
 
-	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read_acquire)(&cxl_region_rwsem);
 	rc = emit_target_list(cxlsd, buf);
 	if (rc < 0)
 		return rc;
@@ -235,7 +235,7 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at
 {
 	struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
 
-	guard(rwsem_read)(&cxl_dpa_rwsem);
+	guard(rwsem_read_acquire)(&cxl_dpa_rwsem);
 	return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled));
 }
 static DEVICE_ATTR_RO(dpa_resource);
@@ -560,7 +560,7 @@ static ssize_t decoders_committed_show(struct device *dev,
 {
 	struct cxl_port *port = to_cxl_port(dev);
 
-	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read_acquire)(&cxl_region_rwsem);
 	return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
 }
 
@@ -1729,7 +1729,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
 	if (xa_empty(&port->dports))
 		return -EINVAL;
 
-	guard(rwsem_write)(&cxl_region_rwsem);
+	guard(rwsem_write_acquire)(&cxl_region_rwsem);
 	for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
 		struct cxl_dport *dport = find_dport(port, target_map[i]);
 
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 17e69f6cc772..65313a3548e4 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -139,18 +139,13 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
 {
 	struct cxl_region *cxlr = to_cxl_region(dev);
 	struct cxl_region_params *p = &cxlr->params;
-	ssize_t rc;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_read_intr_acquire, region_rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(region_rwsem))
+		return PTR_ERR(region_rwsem);
 	if (cxlr->mode != CXL_PARTMODE_PMEM)
-		rc = sysfs_emit(buf, "\n");
-	else
-		rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
-	up_read(&cxl_region_rwsem);
-
-	return rc;
+		return sysfs_emit(buf, "\n");
+	return sysfs_emit(buf, "%pUb\n", &p->uuid);
 }
 
 static int is_dup(struct device *match, void *data)
@@ -162,7 +157,7 @@ static int is_dup(struct device *match, void *data)
 	if (!is_cxl_region(match))
 		return 0;
 
-	lockdep_assert_held(&cxl_region_rwsem);
+	lockdep_assert_held(&cxl_region_rwsem.rw_semaphore);
 	cxlr = to_cxl_region(match);
 	p = &cxlr->params;
 
@@ -192,27 +187,22 @@ static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
 	if (uuid_is_null(&temp))
 		return -EINVAL;
 
-	rc = down_write_killable(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_write_kill_acquire, region_rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(region_rwsem))
+		return PTR_ERR(region_rwsem);
 
 	if (uuid_equal(&p->uuid, &temp))
-		goto out;
+		return len;
 
-	rc = -EBUSY;
 	if (p->state >= CXL_CONFIG_ACTIVE)
-		goto out;
+		return -EBUSY;
 
 	rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
 	if (rc < 0)
-		goto out;
+		return rc;
 
 	uuid_copy(&p->uuid, &temp);
-out:
-	up_write(&cxl_region_rwsem);
 
-	if (rc)
-		return rc;
 	return len;
 }
 static DEVICE_ATTR_RW(uuid);
@@ -353,22 +343,18 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr)
 static int queue_reset(struct cxl_region *cxlr)
 {
 	struct cxl_region_params *p = &cxlr->params;
-	int rc;
 
-	rc = down_write_killable(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_write_kill_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
 
 	/* Already in the requested state? */
 	if (p->state < CXL_CONFIG_COMMIT)
-		goto out;
+		return 0;
 
 	p->state = CXL_CONFIG_RESET_PENDING;
 
-out:
-	up_write(&cxl_region_rwsem);
-
-	return rc;
+	return 0;
 }
 
 static int __commit(struct cxl_region *cxlr)
@@ -376,19 +362,17 @@ static int __commit(struct cxl_region *cxlr)
 	struct cxl_region_params *p = &cxlr->params;
 	int rc;
 
-	rc = down_write_killable(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_write_kill_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
 
 	/* Already in the requested state? */
 	if (p->state >= CXL_CONFIG_COMMIT)
-		goto out;
+		return 0;
 
 	/* Not ready to commit? */
-	if (p->state < CXL_CONFIG_ACTIVE) {
-		rc = -ENXIO;
-		goto out;
-	}
+	if (p->state < CXL_CONFIG_ACTIVE)
+		return -ENXIO;
 
 	/*
 	 * Invalidate caches before region setup to drop any speculative
@@ -396,16 +380,15 @@ static int __commit(struct cxl_region *cxlr)
 	 */
 	rc = cxl_region_invalidate_memregion(cxlr);
 	if (rc)
-		goto out;
+		return rc;
 
 	rc = cxl_region_decode_commit(cxlr);
-	if (rc == 0)
-		p->state = CXL_CONFIG_COMMIT;
+	if (rc)
+		return rc;
 
-out:
-	up_write(&cxl_region_rwsem);
+	p->state = CXL_CONFIG_COMMIT;
 
-	return rc;
+	return 0;
 }
 
 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
@@ -441,7 +424,7 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
 	 * With the reset pending take cxl_region_rwsem unconditionally
 	 * to ensure the reset gets handled before returning.
 	 */
-	guard(rwsem_write)(&cxl_region_rwsem);
+	guard(rwsem_write_acquire)(&cxl_region_rwsem);
 
 	/*
 	 * Revalidate that the reset is still pending in case another
@@ -460,15 +443,11 @@ static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
 {
 	struct cxl_region *cxlr = to_cxl_region(dev);
 	struct cxl_region_params *p = &cxlr->params;
-	ssize_t rc;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
-	rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
-	up_read(&cxl_region_rwsem);
-
-	return rc;
+	CLASS(rwsem_read_intr_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
+	return sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
 }
 static DEVICE_ATTR_RW(commit);
 
@@ -492,15 +471,11 @@ static ssize_t interleave_ways_show(struct device *dev,
 {
 	struct cxl_region *cxlr = to_cxl_region(dev);
 	struct cxl_region_params *p = &cxlr->params;
-	ssize_t rc;
-
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
-	rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
-	up_read(&cxl_region_rwsem);
 
-	return rc;
+	CLASS(rwsem_read_intr_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
+	return sysfs_emit(buf, "%d\n", p->interleave_ways);
 }
 
 static const struct attribute_group *get_cxl_region_target_group(void);
@@ -535,23 +510,21 @@ static ssize_t interleave_ways_store(struct device *dev,
 		return -EINVAL;
 	}
 
-	rc = down_write_killable(&cxl_region_rwsem);
-	if (rc)
-		return rc;
-	if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
-		rc = -EBUSY;
-		goto out;
-	}
+	CLASS(rwsem_write_kill_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
+
+	if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+		return -EBUSY;
 
 	save = p->interleave_ways;
 	p->interleave_ways = val;
 	rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
-	if (rc)
+	if (rc) {
 		p->interleave_ways = save;
-out:
-	up_write(&cxl_region_rwsem);
-	if (rc)
 		return rc;
+	}
+
 	return len;
 }
 static DEVICE_ATTR_RW(interleave_ways);
@@ -562,15 +535,11 @@ static ssize_t interleave_granularity_show(struct device *dev,
 {
 	struct cxl_region *cxlr = to_cxl_region(dev);
 	struct cxl_region_params *p = &cxlr->params;
-	ssize_t rc;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
-	rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
-	up_read(&cxl_region_rwsem);
-
-	return rc;
+	CLASS(rwsem_read_intr_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
+	return sysfs_emit(buf, "%d\n", p->interleave_granularity);
 }
 
 static ssize_t interleave_granularity_store(struct device *dev,
@@ -603,19 +572,15 @@ static ssize_t interleave_granularity_store(struct device *dev,
 	if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
 		return -EINVAL;
 
-	rc = down_write_killable(&cxl_region_rwsem);
-	if (rc)
-		return rc;
-	if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
-		rc = -EBUSY;
-		goto out;
-	}
+	CLASS(rwsem_write_kill_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
+
+	if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
+		return -EBUSY;
 
 	p->interleave_granularity = val;
-out:
-	up_write(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+
 	return len;
 }
 static DEVICE_ATTR_RW(interleave_granularity);
@@ -626,17 +591,14 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
 	struct cxl_region *cxlr = to_cxl_region(dev);
 	struct cxl_region_params *p = &cxlr->params;
 	u64 resource = -1ULL;
-	ssize_t rc;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_read_intr_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
+
 	if (p->res)
 		resource = p->res->start;
-	rc = sysfs_emit(buf, "%#llx\n", resource);
-	up_read(&cxl_region_rwsem);
-
-	return rc;
+	return sysfs_emit(buf, "%#llx\n", resource);
 }
 static DEVICE_ATTR_RO(resource);
 
@@ -664,7 +626,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
 	struct resource *res;
 	u64 remainder = 0;
 
-	lockdep_assert_held_write(&cxl_region_rwsem);
+	lockdep_assert_held_write(&cxl_region_rwsem.rw_semaphore);
 
 	/* Nothing to do... */
 	if (p->res && resource_size(p->res) == size)
@@ -706,7 +668,7 @@ static void cxl_region_iomem_release(struct cxl_region *cxlr)
 	struct cxl_region_params *p = &cxlr->params;
 
 	if (device_is_registered(&cxlr->dev))
-		lockdep_assert_held_write(&cxl_region_rwsem);
+		lockdep_assert_held_write(&cxl_region_rwsem.rw_semaphore);
 	if (p->res) {
 		/*
 		 * Autodiscovered regions may not have been able to insert their
@@ -723,7 +685,7 @@ static int free_hpa(struct cxl_region *cxlr)
 {
 	struct cxl_region_params *p = &cxlr->params;
 
-	lockdep_assert_held_write(&cxl_region_rwsem);
+	lockdep_assert_held_write(&cxl_region_rwsem.rw_semaphore);
 
 	if (!p->res)
 		return 0;
@@ -747,15 +709,14 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
 	if (rc)
 		return rc;
 
-	rc = down_write_killable(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_write_kill_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
 
 	if (val)
 		rc = alloc_hpa(cxlr, val);
 	else
 		rc = free_hpa(cxlr);
-	up_write(&cxl_region_rwsem);
 
 	if (rc)
 		return rc;
@@ -769,17 +730,13 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
 	struct cxl_region *cxlr = to_cxl_region(dev);
 	struct cxl_region_params *p = &cxlr->params;
 	u64 size = 0;
-	ssize_t rc;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_read_intr_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
 	if (p->res)
 		size = resource_size(p->res);
-	rc = sysfs_emit(buf, "%#llx\n", size);
-	up_read(&cxl_region_rwsem);
-
-	return rc;
+	return sysfs_emit(buf, "%#llx\n", size);
 }
 static DEVICE_ATTR_RW(size);
 
@@ -803,28 +760,21 @@ static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
 {
 	struct cxl_region_params *p = &cxlr->params;
 	struct cxl_endpoint_decoder *cxled;
-	int rc;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_read_intr_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
 
 	if (pos >= p->interleave_ways) {
 		dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
 			p->interleave_ways);
-		rc = -ENXIO;
-		goto out;
+		return -ENXIO;
 	}
 
 	cxled = p->targets[pos];
 	if (!cxled)
-		rc = sysfs_emit(buf, "\n");
-	else
-		rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
-out:
-	up_read(&cxl_region_rwsem);
-
-	return rc;
+		return sysfs_emit(buf, "\n");
+	return sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
 }
 
 static int check_commit_order(struct device *dev, void *data)
@@ -1127,7 +1077,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
 	unsigned long index;
 	int rc = -EBUSY;
 
-	lockdep_assert_held_write(&cxl_region_rwsem);
+	lockdep_assert_held_write(&cxl_region_rwsem.rw_semaphore);
 
 	cxl_rr = cxl_rr_load(port, cxlr);
 	if (cxl_rr) {
@@ -1228,7 +1178,7 @@ static void cxl_port_detach_region(struct cxl_port *port,
 	struct cxl_region_ref *cxl_rr;
 	struct cxl_ep *ep = NULL;
 
-	lockdep_assert_held_write(&cxl_region_rwsem);
+	lockdep_assert_held_write(&cxl_region_rwsem.rw_semaphore);
 
 	cxl_rr = cxl_rr_load(port, cxlr);
 	if (!cxl_rr)
@@ -2137,7 +2087,7 @@ struct cxl_region *cxl_decoder_detach(struct cxl_region *cxlr,
 {
 	struct cxl_region_params *p;
 
-	lockdep_assert_held_write(&cxl_region_rwsem);
+	lockdep_assert_held_write(&cxl_region_rwsem.rw_semaphore);
 
 	if (!cxled) {
 		p = &cxlr->params;
@@ -2199,20 +2149,17 @@ static int attach_target(struct cxl_region *cxlr,
 			 struct cxl_endpoint_decoder *cxled, int pos,
 			 unsigned int state)
 {
-	int rc = 0;
-
-	if (state == TASK_INTERRUPTIBLE)
-		rc = down_write_killable(&cxl_region_rwsem);
-	else
-		down_write(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	if (state == TASK_INTERRUPTIBLE) {
+		CLASS(rwsem_write_kill_acquire, rwsem)(&cxl_region_rwsem);
+		if (IS_ERR(rwsem))
+			return PTR_ERR(rwsem);
+		guard(rwsem_read_acquire)(&cxl_dpa_rwsem);
+		return cxl_region_attach(cxlr, cxled, pos);
+	}
 
-	down_read(&cxl_dpa_rwsem);
-	rc = cxl_region_attach(cxlr, cxled, pos);
-	up_read(&cxl_dpa_rwsem);
-	up_write(&cxl_region_rwsem);
-	return rc;
+	guard(rwsem_write_acquire)(&cxl_region_rwsem);
+	guard(rwsem_read_acquire)(&cxl_dpa_rwsem);
+	return cxl_region_attach(cxlr, cxled, pos);
 }
 
 static int detach_target(struct cxl_region *cxlr, int pos)
@@ -2644,19 +2591,14 @@ static ssize_t region_show(struct device *dev, struct device_attribute *attr,
 			   char *buf)
 {
 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
-	ssize_t rc;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc)
-		return rc;
+	CLASS(rwsem_read_intr_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem))
+		return PTR_ERR(rwsem);
 
 	if (cxld->region)
-		rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
-	else
-		rc = sysfs_emit(buf, "\n");
-	up_read(&cxl_region_rwsem);
-
-	return rc;
+		return sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
+	return sysfs_emit(buf, "\n");
 }
 DEVICE_ATTR_RO(region);
 
@@ -2995,7 +2937,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
 	struct device *dev;
 	int i;
 
-	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read_acquire)(&cxl_region_rwsem);
 	if (p->state != CXL_CONFIG_COMMIT)
 		return -ENXIO;
 
@@ -3084,7 +3026,7 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
 	struct cxl_dax_region *cxlr_dax;
 	struct device *dev;
 
-	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read_acquire)(&cxl_region_rwsem);
 	if (p->state != CXL_CONFIG_COMMIT)
 		return ERR_PTR(-ENXIO);
 
@@ -3255,7 +3197,7 @@ static int match_region_by_range(struct device *dev, const void *data)
 	cxlr = to_cxl_region(dev);
 	p = &cxlr->params;
 
-	guard(rwsem_read)(&cxl_region_rwsem);
+	guard(rwsem_read_acquire)(&cxl_region_rwsem);
 	if (p->res && p->res->start == r->start && p->res->end == r->end)
 		return 1;
 
@@ -3315,7 +3257,7 @@ static int __construct_region(struct cxl_region *cxlr,
 	struct resource *res;
 	int rc;
 
-	guard(rwsem_write)(&cxl_region_rwsem);
+	guard(rwsem_write_acquire)(&cxl_region_rwsem);
 	p = &cxlr->params;
 	if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
 		dev_err(cxlmd->dev.parent,
@@ -3453,10 +3395,10 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
 
 	attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE);
 
-	down_read(&cxl_region_rwsem);
-	p = &cxlr->params;
-	attach = p->state == CXL_CONFIG_COMMIT;
-	up_read(&cxl_region_rwsem);
+	scoped_guard(rwsem_read_acquire, &cxl_region_rwsem) {
+		p = &cxlr->params;
+		attach = p->state == CXL_CONFIG_COMMIT;
+	}
 
 	if (attach) {
 		/*
@@ -3484,7 +3426,7 @@ u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
 	if (!endpoint)
 		return ~0ULL;
 
-	guard(rwsem_write)(&cxl_region_rwsem);
+	guard(rwsem_write_acquire)(&cxl_region_rwsem);
 
 	xa_for_each(&endpoint->regions, index, iter) {
 		struct cxl_region_params *p = &iter->region->params;
@@ -3524,32 +3466,24 @@ static void shutdown_notifiers(void *_cxlr)
 static int cxl_region_can_probe(struct cxl_region *cxlr)
 {
 	struct cxl_region_params *p = &cxlr->params;
-	int rc;
 
-	rc = down_read_interruptible(&cxl_region_rwsem);
-	if (rc) {
+	CLASS(rwsem_read_intr_acquire, rwsem)(&cxl_region_rwsem);
+	if (IS_ERR(rwsem)) {
 		dev_dbg(&cxlr->dev, "probe interrupted\n");
-		return rc;
+		return PTR_ERR(rwsem);
 	}
 
 	if (p->state < CXL_CONFIG_COMMIT) {
 		dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
-		rc = -ENXIO;
-		goto out;
+		return -ENXIO;
 	}
 
 	if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) {
 		dev_err(&cxlr->dev,
 			"failed to activate, re-commit region and retry\n");
-		rc = -ENXIO;
-		goto out;
+		return -ENXIO;
 	}
 
-out:
-	up_read(&cxl_region_rwsem);
-
-	if (rc)
-		return rc;
 	return 0;
 }
 
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index c8b543d428b0..4c44a50d47d6 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -242,9 +242,46 @@ DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
 DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
 DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
 
+struct rw_semaphore_acquire {
+	struct rw_semaphore rw_semaphore;
+};
+
+#define DECLARE_RWSEM_ACQUIRE(name)                               \
+	struct rw_semaphore_acquire name = { __RWSEM_INITIALIZER( \
+		name.rw_semaphore) }
+
+DEFINE_GUARD(rwsem_read_acquire, struct rw_semaphore_acquire *,
+	     down_read(&_T->rw_semaphore), up_read(&_T->rw_semaphore))
+DEFINE_GUARD(rwsem_write_acquire, struct rw_semaphore_acquire *,
+	     down_write(&_T->rw_semaphore), up_write(&_T->rw_semaphore))
+DEFINE_ACQUIRE(rwsem_read_intr_acquire, rw_semaphore, up_read,
+	       down_read_interruptible)
+DEFINE_ACQUIRE(rwsem_write_kill_acquire, rw_semaphore, up_write,
+	       down_write_killable)
+
+static inline int down_read_try_or_busy(struct rw_semaphore *rwsem)
+{
+	int ret[] = { -EBUSY, 0 };
+
+	return ret[down_read_trylock(rwsem)];
+}
+
+DEFINE_ACQUIRE(rwsem_read_try_acquire, rw_semaphore, up_read,
+	       down_read_try_or_busy)
+
 DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
 DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
 
+static inline int down_write_try_or_busy(struct rw_semaphore *rwsem)
+{
+	int ret[] = { -EBUSY, 0 };
+
+	return ret[down_write_trylock(rwsem)];
+}
+
+DEFINE_ACQUIRE(rwsem_write_try_acquire, rw_semaphore, up_write,
+	       down_write_try_or_busy)
+
 /*
  * downgrade write lock to read lock
  */
-- 
2.49.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ