[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <8a60c7bd72f908a637f0f9e60cce7ab3a047e343.camel@HansenPartnership.com>
Date: Fri, 06 Nov 2020 14:26:05 -0800
From: James Bottomley <James.Bottomley@...senPartnership.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Linus Torvalds <torvalds@...ux-foundation.org>
Cc: linux-scsi <linux-scsi@...r.kernel.org>,
linux-kernel <linux-kernel@...r.kernel.org>
Subject: [GIT PULL] SCSI fixes for 5.10-rc2
Three driver fixes. Two (alua and hpsa) are in hard to trigger
attach/detach situations but the mp3sas one involves a polled to
interrupt switch over that could trigger in any high IOPS situation.
The patch is available here:
git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git scsi-fixes
The short changelog is:
Hannes Reinecke (1):
scsi: scsi_dh_alua: Avoid crash during alua_bus_detach()
Keita Suzuki (1):
scsi: hpsa: Fix memory leak in hpsa_init_one()
Sreekanth Reddy (1):
scsi: mpt3sas: Fix timeouts observed while reenabling IRQ
And the diffstat:
drivers/scsi/device_handler/scsi_dh_alua.c | 9 +++++----
drivers/scsi/hpsa.c | 4 +++-
drivers/scsi/mpt3sas/mpt3sas_base.c | 7 +++++++
3 files changed, 15 insertions(+), 5 deletions(-)
With full diff below.
James
---
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index f32da0ca529e..308bda2e9c00 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -658,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_lock();
list_for_each_entry_rcu(h,
&tmp_pg->dh_list, node) {
- /* h->sdev should always be valid */
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state = desc[0];
}
rcu_read_unlock();
@@ -705,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
pg->expiry = 0;
rcu_read_lock();
list_for_each_entry_rcu(h, &pg->dh_list, node) {
- BUG_ON(!h->sdev);
+ if (!h->sdev)
+ continue;
h->sdev->access_state =
(pg->state & SCSI_ACCESS_STATE_MASK);
if (pg->pref)
@@ -1147,7 +1148,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
spin_lock(&h->pg_lock);
pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
rcu_assign_pointer(h->pg, NULL);
- h->sdev = NULL;
spin_unlock(&h->pg_lock);
if (pg) {
spin_lock_irq(&pg->lock);
@@ -1156,6 +1156,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
kref_put(&pg->kref, release_port_group);
}
sdev->handler_data = NULL;
+ synchronize_rcu();
kfree(h);
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 83ce4f11a589..8df70c92911d 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -8855,7 +8855,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* hook into SCSI subsystem */
rc = hpsa_scsi_add_host(h);
if (rc)
- goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
/* Monitor the controller for firmware lockups */
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
@@ -8870,6 +8870,8 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
HPSA_EVENT_MONITOR_INTERVAL);
return 0;
+clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
+ kfree(h->lastlogicals);
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
hpsa_free_performant_mode(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 93230cd1982f..e4cc92bc4d94 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1740,6 +1740,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
reply_q->irq_poll_scheduled = false;
reply_q->irq_line_enable = true;
enable_irq(reply_q->os_irq);
+ /*
+ * Go for one more round of processing the
+ * reply descriptor post queue incase if HBA
+ * Firmware has posted some reply descriptors
+ * while reenabling the IRQ.
+ */
+ _base_process_reply_queue(reply_q);
}
return num_entries;
Powered by blists - more mailing lists