[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260127135917.1597762-3-corey@minyard.net>
Date: Tue, 27 Jan 2026 07:54:40 -0600
From: Corey Minyard <corey@...yard.net>
To: Breno Leitao <leitao@...ian.org>,
Nathan Chancellor <nathan@...nel.org>,
Nick Desaulniers <nick.desaulniers+lkml@...il.com>,
Bill Wendling <morbo@...gle.com>,
Justin Stitt <justinstitt@...gle.com>
Cc: openipmi-developer@...ts.sourceforge.net,
linux-kernel@...r.kernel.org,
llvm@...ts.linux.dev,
kernel-team@...a.com,
Corey Minyard <corey@...yard.net>
Subject: [PATCH 2/2] ipmi: Consolidate the run to completion checking for xmit msgs lock
It made things hard to read, move the check to a function.
Signed-off-by: Corey Minyard <corey@...yard.net>
---
drivers/char/ipmi/ipmi_msghandler.c | 40 ++++++++++++++++-------------
1 file changed, 22 insertions(+), 18 deletions(-)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a590a67294e2..030828cdb778 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -602,6 +602,20 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
static int __scan_channels(struct ipmi_smi *intf,
struct ipmi_device_id *id, bool rescan);
+static void ipmi_lock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion,
+ unsigned long *flags)
+{
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, *flags);
+}
+
+static void ipmi_unlock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion,
+ unsigned long *flags)
+{
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->xmit_msgs_lock, *flags);
+}
+
static void free_ipmi_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
@@ -1878,11 +1892,9 @@ static void smi_send(struct ipmi_smi *intf,
int run_to_completion = READ_ONCE(intf->run_to_completion);
unsigned long flags = 0;
- if (!run_to_completion)
- spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
if (smi_msg)
handlers->sender(intf->send_info, smi_msg);
@@ -4826,8 +4838,7 @@ static void smi_work(struct work_struct *t)
* message delivery.
*/
restart:
- if (!run_to_completion)
- spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
struct list_head *entry = NULL;
@@ -4843,8 +4854,7 @@ static void smi_work(struct work_struct *t)
intf->curr_msg = newmsg;
}
}
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
if (newmsg) {
cc = intf->handlers->sender(intf->send_info, newmsg);
@@ -4852,13 +4862,9 @@ static void smi_work(struct work_struct *t)
if (newmsg->recv_msg)
deliver_err_response(intf,
newmsg->recv_msg, cc);
- if (!run_to_completion)
- spin_lock_irqsave(&intf->xmit_msgs_lock,
- flags);
+ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
intf->curr_msg = NULL;
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->xmit_msgs_lock,
- flags);
+ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
ipmi_free_smi_msg(newmsg);
newmsg = NULL;
goto restart;
@@ -4928,16 +4934,14 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
flags);
- if (!run_to_completion)
- spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
/*
* We can get an asynchronous event or receive message in addition
* to commands we send.
*/
if (msg == intf->curr_msg)
intf->curr_msg = NULL;
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+ ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
if (run_to_completion)
smi_work(&intf->smi_work);
--
2.43.0
Powered by blists - more mailing lists