[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1645700699-82369-3-git-send-email-john.garry@huawei.com>
Date: Thu, 24 Feb 2022 19:04:59 +0800
From: John Garry <john.garry@...wei.com>
To: <jejb@...ux.ibm.com>, <martin.petersen@...cle.com>
CC: <chenxiang66@...ilicon.com>, <linux-scsi@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <linuxarm@...wei.com>,
<damien.lemoal@...nsource.wdc.com>,
John Garry <john.garry@...wei.com>
Subject: [PATCH 2/2] scsi: libsas: Use bool for queue_work() return code
Function queue_work() returns a bool, so use a bool to hold this value
for the return code, which should make the code a tiny bit more clear.
Signed-off-by: John Garry <john.garry@...wei.com>
---
drivers/scsi/libsas/sas_event.c | 23 +++++++++--------------
drivers/scsi/libsas/sas_internal.h | 2 +-
2 files changed, 10 insertions(+), 15 deletions(-)
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 8ff58fd97837..e5eb24100e2d 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -10,13 +10,13 @@
#include <scsi/scsi_host.h>
#include "sas_internal.h"
-int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
/* it's added to the defer_q when draining so return succeed */
- int rc = 1;
+ bool rc = true;
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
- return 0;
+ return false;
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
@@ -28,11 +28,11 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
return rc;
}
-static int sas_queue_event(int event, struct sas_work *work,
+static bool sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
unsigned long flags;
- int rc;
+ bool rc;
spin_lock_irqsave(&ha->lock, flags);
rc = sas_queue_work(ha, work);
@@ -44,13 +44,12 @@ static int sas_queue_event(int event, struct sas_work *work,
void sas_queue_deferred_work(struct sas_ha_struct *ha)
{
struct sas_work *sw, *_sw;
- int ret;
spin_lock_irq(&ha->lock);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
- ret = sas_queue_work(ha, sw);
- if (ret != 1) {
+
+ if (sas_queue_work(ha, sw) == false) {
pm_runtime_put(ha->dev);
sas_free_event(to_asd_sas_event(&sw->work));
}
@@ -170,7 +169,6 @@ void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
{
struct sas_ha_struct *ha = phy->ha;
struct asd_sas_event *ev;
- int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
@@ -186,8 +184,7 @@ void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
if (sas_defer_event(phy, ev))
return;
- ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1) {
+ if (sas_queue_event(event, &ev->work, ha) == false) {
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
@@ -199,7 +196,6 @@ void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
{
struct sas_ha_struct *ha = phy->ha;
struct asd_sas_event *ev;
- int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
@@ -215,8 +211,7 @@ void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
if (sas_defer_event(phy, ev))
return;
- ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1) {
+ if (sas_queue_event(event, &ev->work, ha) == false) {
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 24843db2cb65..13d0ffaada93 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -67,7 +67,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
-int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
+bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
--
2.26.2
Powered by blists - more mailing lists