lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150727055516.4759.74800.stgit@softrs>
Date:	Mon, 27 Jul 2015 14:55:16 +0900
From:	Hidehiro Kawai <hidehiro.kawai.ez@...achi.com>
To:	Corey Minyard <minyard@....org>
Cc:	openipmi-developer@...ts.sourceforge.net,
	linux-kernel@...r.kernel.org
Subject: [PATCH 3/7] ipmi: Don't flush messages in sneder() in
 run-to-completion mode

When flushing queued messages in run-to-completion mode,
smi_event_handler() is recursively called.

flush_messages()
 smi_event_handler()
  handle_transaction_done()
   deliver_recv_msg()
    ipmi_smi_msg_received()
     smi_recv_tasklet()
      sender()
       flush_messages()
        smi_event_handler()
         ...

The depth of the recursive call depends on the number of queued
messages, so it can cause a stack overflow if many messages have
been queued.

To solve this problem, this patch removes flush_messages()
from sender()@ipmi_si_intf.c.  Instead, add flush_messages() to
caller side of sender() if needed.  Additionally, to implement this,
add new handler flush_messages to struct ipmi_smi_handlers.

Signed-off-by: Hidehiro Kawai <hidehiro.kawai.ez@...achi.com>
---
 drivers/char/ipmi/ipmi_msghandler.c |    3 +++
 drivers/char/ipmi/ipmi_si_intf.c    |    5 +++--
 include/linux/ipmi_smi.h            |    5 +++++
 3 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a6e6ec0..f1ecd25 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -4291,6 +4291,9 @@ static void ipmi_panic_request_and_wait(ipmi_smi_t           intf,
 			    0, 1); /* Don't retry, and don't wait. */
 	if (rv)
 		atomic_sub(2, &panic_done_count);
+	else if (intf->handlers->flush_messages)
+		intf->handlers->flush_messages(intf->send_info);
+
 	while (atomic_read(&panic_done_count) != 0)
 		ipmi_poll(intf);
 }
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 660e53b..814b7b7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -928,8 +928,9 @@ static void check_start_timer_thread(struct smi_info *smi_info)
 	}
 }
 
-static void flush_messages(struct smi_info *smi_info)
+static void flush_messages(void *send_info)
 {
+	struct smi_info *smi_info = send_info;
 	enum si_sm_result result;
 
 	/*
@@ -958,7 +959,6 @@ static void sender(void                *send_info,
 		 */
 		smi_info->waiting_msg = msg;
 
-		flush_messages(smi_info);
 		return;
 	}
 
@@ -1264,6 +1264,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
 	.set_need_watch		= set_need_watch,
 	.set_maintenance_mode   = set_maintenance_mode,
 	.set_run_to_completion  = set_run_to_completion,
+	.flush_messages		= flush_messages,
 	.poll			= poll,
 };
 
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 0b1e569..ba57fb1 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,11 @@ struct ipmi_smi_handlers {
 	   implement it. */
 	void (*set_need_watch)(void *send_info, bool enable);
 
+	/*
+	 * Called when flushing all pending messages.
+	 */
+	void (*flush_messages)(void *send_info);
+
 	/* Called when the interface should go into "run to
 	   completion" mode.  If this call sets the value to true, the
 	   interface should make sure that all messages are flushed


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ