[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240327160314.9982-7-apais@linux.microsoft.com>
Date: Wed, 27 Mar 2024 16:03:11 +0000
From: Allen Pais <apais@...ux.microsoft.com>
To: linux-kernel@...r.kernel.org
Cc: tj@...nel.org,
keescook@...omium.org,
vkoul@...nel.org,
marcan@...can.st,
sven@...npeter.dev,
florian.fainelli@...adcom.com,
rjui@...adcom.com,
sbranden@...adcom.com,
paul@...pouillou.net,
Eugeniy.Paltsev@...opsys.com,
manivannan.sadhasivam@...aro.org,
vireshk@...nel.org,
Frank.Li@....com,
leoyang.li@....com,
zw@...kernel.org,
wangzhou1@...ilicon.com,
haijie1@...wei.com,
shawnguo@...nel.org,
s.hauer@...gutronix.de,
sean.wang@...iatek.com,
matthias.bgg@...il.com,
angelogioacchino.delregno@...labora.com,
afaerber@...e.de,
logang@...tatee.com,
daniel@...que.org,
haojian.zhuang@...il.com,
robert.jarzmik@...e.fr,
andersson@...nel.org,
konrad.dybcio@...aro.org,
orsonzhai@...il.com,
baolin.wang@...ux.alibaba.com,
zhang.lyra@...il.com,
patrice.chotard@...s.st.com,
linus.walleij@...aro.org,
wens@...e.org,
jernej.skrabec@...il.com,
peter.ujfalusi@...il.com,
kys@...rosoft.com,
haiyangz@...rosoft.com,
wei.liu@...nel.org,
decui@...rosoft.com,
jassisinghbrar@...il.com,
mchehab@...nel.org,
maintainers@...echerrydvr.com,
aubin.constans@...rochip.com,
ulf.hansson@...aro.org,
manuel.lauss@...il.com,
mirq-linux@...e.qmqm.pl,
jh80.chung@...sung.com,
oakad@...oo.com,
hayashi.kunihiko@...ionext.com,
mhiramat@...nel.org,
brucechang@....com.tw,
HaraldWelte@...tech.com,
pierre@...man.eu,
duncan.sands@...e.fr,
stern@...land.harvard.edu,
oneukum@...e.com,
openipmi-developer@...ts.sourceforge.net,
dmaengine@...r.kernel.org,
asahi@...ts.linux.dev,
linux-arm-kernel@...ts.infradead.org,
linux-rpi-kernel@...ts.infradead.org,
linux-mips@...r.kernel.org,
imx@...ts.linux.dev,
linuxppc-dev@...ts.ozlabs.org,
linux-mediatek@...ts.infradead.org,
linux-actions@...ts.infradead.org,
linux-arm-msm@...r.kernel.org,
linux-riscv@...ts.infradead.org,
linux-sunxi@...ts.linux.dev,
linux-tegra@...r.kernel.org,
linux-hyperv@...r.kernel.org,
linux-rdma@...r.kernel.org,
linux-media@...r.kernel.org,
linux-mmc@...r.kernel.org,
linux-omap@...r.kernel.org,
linux-renesas-soc@...r.kernel.org,
linux-s390@...r.kernel.org,
netdev@...r.kernel.org,
linux-usb@...r.kernel.org
Subject: [PATCH 6/9] ipmi: Convert from tasklet to BH workqueue
The only generic interface to execute asynchronously in the BH context is
tasklet; however, it's marked deprecated and has some design flaws. To
replace tasklets, BH workqueue support was recently added. A BH workqueue
behaves similarly to regular workqueues except that the queued work items
are executed in the BH context.
This patch converts drivers/infiniband/* from tasklet to BH workqueue.
Based on the work done by Tejun Heo <tj@...nel.org>
Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-6.10
Signed-off-by: Allen Pais <allen.lkml@...il.com>
---
drivers/char/ipmi/ipmi_msghandler.c | 30 ++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index b0eedc4595b3..fce2a2dbdc82 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -36,12 +36,13 @@
#include <linux/nospec.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
+#include <linux/workqueue.h>
#define IPMI_DRIVER_VERSION "39.2"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
-static void smi_recv_tasklet(struct tasklet_struct *t);
+static void smi_recv_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
@@ -498,13 +499,13 @@ struct ipmi_smi {
/*
* Messages queued for delivery. If delivery fails (out of memory
* for instance), They will stay in here to be processed later in a
- * periodic timer interrupt. The tasklet is for handling received
+ * periodic timer interrupt. The work is for handling received
* messages directly from the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
- struct tasklet_struct recv_tasklet;
+ struct work_struct recv_work;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
@@ -704,7 +705,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
struct cmd_rcvr *rcvr, *rcvr2;
struct list_head list;
- tasklet_kill(&intf->recv_tasklet);
+ cancel_work_sync(&intf->recv_work);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
@@ -1319,7 +1320,7 @@ static void free_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
- /* SRCU cleanup must happen in task context. */
+ /* SRCU cleanup must happen in work context. */
queue_work(remove_work_wq, &user->remove_work);
}
@@ -3605,8 +3606,7 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- tasklet_setup(&intf->recv_tasklet,
- smi_recv_tasklet);
+ INIT_WORK(&intf->recv_work, smi_recv_work);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
@@ -4779,7 +4779,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
* To preserve message order, quit if we
* can't handle a message. Add the message
* back at the head, this is safe because this
- * tasklet is the only thing that pulls the
+ * work is the only thing that pulls the
* messages.
*/
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
@@ -4812,10 +4812,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
}
-static void smi_recv_tasklet(struct tasklet_struct *t)
+static void smi_recv_work(struct work_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
+ struct ipmi_smi *intf = from_work(intf, t, recv_work);
int run_to_completion = intf->run_to_completion;
struct ipmi_smi_msg *newmsg = NULL;
@@ -4866,7 +4866,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
/*
* To preserve message order, we keep a queue and deliver from
- * a tasklet.
+ * a work.
*/
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
@@ -4887,9 +4887,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_tasklet(&intf->recv_tasklet);
+ smi_recv_work(&intf->recv_work);
else
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_bh_wq, &intf->recv_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4899,7 +4899,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_bh_wq, &intf->recv_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -5068,7 +5068,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_bh_wq, &intf->recv_work);
return need_timer;
}
--
2.17.1
Powered by blists - more mailing lists