[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <4340233ae5e105c5617401f84a5ed13c341f5ecf.camel@HansenPartnership.com>
Date: Fri, 27 Nov 2020 13:56:30 -0800
From: James Bottomley <James.Bottomley@...senPartnership.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Linus Torvalds <torvalds@...ux-foundation.org>
Cc: linux-scsi <linux-scsi@...r.kernel.org>,
linux-kernel <linux-kernel@...r.kernel.org>
Subject: [GIT PULL] SCSI fixes for 5.10-rc5
Three small fixes in the UFS driver: two are for power management
issues and the third is to fix a slew of problem in the sysfs code.
The patch is available here:
git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git scsi-fixes
The short changelog is:
Can Guo (2):
scsi: ufs: Make sure clk scaling happens only when HBA is runtime ACTIVE
scsi: ufs: Fix unexpected values from ufshcd_read_desc_param()
Stanley Chu (1):
scsi: ufs: Fix race between shutdown and runtime resume flow
And the diffstat:
drivers/scsi/ufs/ufshcd.c | 37 +++++++++++++++++++++++--------------
1 file changed, 23 insertions(+), 14 deletions(-)
With full diff below.
James
---
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 7a160b86adc6..0c148fcd24de 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1294,8 +1294,15 @@ static int ufshcd_devfreq_target(struct device *dev,
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ pm_runtime_get_noresume(hba->dev);
+ if (!pm_runtime_active(hba->dev)) {
+ pm_runtime_put_noidle(hba->dev);
+ ret = -EAGAIN;
+ goto out;
+ }
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up);
+ pm_runtime_put(hba->dev);
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
@@ -3192,13 +3199,19 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
/* Get the length of descriptor */
ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
if (!buff_len) {
- dev_err(hba->dev, "%s: Failed to get desc length", __func__);
+ dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
+ return -EINVAL;
+ }
+
+ if (param_offset >= buff_len) {
+ dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
+ __func__, param_offset, desc_id, buff_len);
return -EINVAL;
}
/* Check whether we need temp memory */
if (param_offset != 0 || param_size < buff_len) {
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
return -ENOMEM;
} else {
@@ -3212,14 +3225,14 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
desc_buf, &buff_len);
if (ret) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
__func__, desc_id, desc_index, param_offset, ret);
goto out;
}
/* Sanity check */
if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
- dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
ret = -EINVAL;
goto out;
@@ -3229,12 +3242,12 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
- /* Check wherher we will not copy more data, than available */
- if (is_kmalloc && (param_offset + param_size) > buff_len)
- param_size = buff_len - param_offset;
-
- if (is_kmalloc)
+ if (is_kmalloc) {
+ /* Make sure we don't copy more data than available */
+ if (param_offset + param_size > buff_len)
+ param_size = buff_len - param_offset;
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+ }
out:
if (is_kmalloc)
kfree(desc_buf);
@@ -8900,11 +8913,7 @@ int ufshcd_shutdown(struct ufs_hba *hba)
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
- if (pm_runtime_suspended(hba->dev)) {
- ret = ufshcd_runtime_resume(hba);
- if (ret)
- goto out;
- }
+ pm_runtime_get_sync(hba->dev);
ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
out:
Powered by blists - more mailing lists