lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250908012108.514698-8-o-takashi@sakamocchi.jp>
Date: Mon,  8 Sep 2025 10:21:04 +0900
From: Takashi Sakamoto <o-takashi@...amocchi.jp>
To: linux1394-devel@...ts.sourceforge.net
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH 07/11] firewire: core: code refactoring to evaluate transaction result to CSR_BUS_MANAGER_ID

The call of bm_work should be done after acquiring spin lock of fw_card.
For asynchronous transaction, the lock should be released temporarily
due to event waiting.

A commit 27310d561622 ("firewire: core: use guard macro to maintain
properties of fw_card") applied scoped_guard() to the bm_work function,
however it looks hard to follow to the control flow.

This commit refactors the spin lock acquisition after the transaction.

Signed-off-by: Takashi Sakamoto <o-takashi@...amocchi.jp>
---
 drivers/firewire/core-card.c | 30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index b98797e4f1d4..e1a7a151b109 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -291,7 +291,7 @@ static void bm_work(struct work_struct *work)
 	struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work);
 	struct fw_device *root_device, *irm_device;
 	struct fw_node *root_node __free(node_unref) = NULL;
-	int root_id, new_root_id, irm_id, bm_id, local_id;
+	int root_id, new_root_id, irm_id, local_id;
 	int gap_count, generation, grace;
 	bool do_reset = false;
 	bool root_device_is_running;
@@ -376,19 +376,22 @@ static void bm_work(struct work_struct *work)
 		if (rcode == RCODE_GENERATION)
 			return;
 
-		bm_id = be32_to_cpu(data[0]);
+		spin_lock_irq(&card->lock);
 
-		scoped_guard(spinlock_irq, &card->lock) {
-			if (rcode == RCODE_COMPLETE && generation == card->generation)
-				card->bm_node_id =
-				    bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
-		}
+		if (rcode == RCODE_COMPLETE) {
+			int bm_id = be32_to_cpu(data[0]);
 
-		if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
-			/* Somebody else is BM.  Only act as IRM. */
-			if (local_id == irm_id)
-				allocate_broadcast_channel(card, generation);
-			return;
+			if (generation == card->generation)
+				card->bm_node_id = bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
+
+			if (bm_id != 0x3f) {
+				spin_unlock_irq(&card->lock);
+
+				// Somebody else is BM.  Only act as IRM.
+				if (local_id == irm_id)
+					allocate_broadcast_channel(card, generation);
+				return;
+			}
 		}
 
 		if (rcode == RCODE_SEND_ERROR) {
@@ -397,12 +400,11 @@ static void bm_work(struct work_struct *work)
 			 * some local problem.  Let's try again later and hope
 			 * that the problem has gone away by then.
 			 */
+			spin_unlock_irq(&card->lock);
 			fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
 			return;
 		}
 
-		spin_lock_irq(&card->lock);
-
 		if (rcode != RCODE_COMPLETE && !keep_this_irm) {
 			/*
 			 * The lock request failed, maybe the IRM
-- 
2.48.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ