[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210120150142.5049-4-stanley.chu@mediatek.com>
Date: Wed, 20 Jan 2021 23:01:42 +0800
From: Stanley Chu <stanley.chu@...iatek.com>
To: <linux-scsi@...r.kernel.org>, <martin.petersen@...cle.com>,
<avri.altman@....com>, <alim.akhtar@...sung.com>,
<jejb@...ux.ibm.com>
CC: <beanhuo@...ron.com>, <asutoshd@...eaurora.org>,
<cang@...eaurora.org>, <matthias.bgg@...il.com>,
<bvanassche@....org>, <linux-mediatek@...ts.infradead.org>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-kernel@...r.kernel.org>, <kuohong.wang@...iatek.com>,
<peter.wang@...iatek.com>, <chun-hung.wu@...iatek.com>,
<andy.teng@...iatek.com>, <chaotian.jing@...iatek.com>,
<cc.chou@...iatek.com>, <jiajie.hao@...iatek.com>,
<alice.chao@...iatek.com>, Stanley Chu <stanley.chu@...iatek.com>
Subject: [PATCH v3 3/3] scsi: ufs: Cleanup and refactor clk-scaling feature
Manipulate clock scaling related stuff only if the host capability
supports clock scaling feature to avoid redundant code execution.
Reviewed-by: Can Guo <cang@...eaurora.org>
Signed-off-by: Stanley Chu <stanley.chu@...iatek.com>
---
drivers/scsi/ufs/ufshcd.c | 63 ++++++++++++++++++++-------------------
1 file changed, 32 insertions(+), 31 deletions(-)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f707234580ea..edfac8752d33 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1500,9 +1500,6 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool suspend = false;
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
@@ -1522,9 +1519,6 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool resume = false;
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_scaling.is_suspended) {
resume = true;
@@ -5759,6 +5753,26 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
+static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+{
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = allow;
+ up_write(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+{
+ if (suspend) {
+ if (hba->clk_scaling.is_enabled)
+ ufshcd_suspend_clkscaling(hba);
+ ufshcd_clk_scaling_allow(hba, false);
+ } else {
+ ufshcd_clk_scaling_allow(hba, true);
+ if (hba->clk_scaling.is_enabled)
+ ufshcd_resume_clkscaling(hba);
+ }
+}
+
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
pm_runtime_get_sync(hba->dev);
@@ -5783,22 +5797,18 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba, false);
- if (hba->clk_scaling.is_enabled)
+ if (ufshcd_is_clkscaling_supported(hba) &&
+ hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
- down_write(&hba->clk_scaling_lock);
- hba->clk_scaling.is_allowed = false;
- up_write(&hba->clk_scaling_lock);
+ ufshcd_clk_scaling_allow(hba, false);
}
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
ufshcd_release(hba);
- down_write(&hba->clk_scaling_lock);
- hba->clk_scaling.is_allowed = true;
- up_write(&hba->clk_scaling_lock);
- if (hba->clk_scaling.is_enabled)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
pm_runtime_put(hba->dev);
}
@@ -8703,12 +8713,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
- if (hba->clk_scaling.is_enabled)
- ufshcd_suspend_clkscaling(hba);
-
- down_write(&hba->clk_scaling_lock);
- hba->clk_scaling.is_allowed = false;
- up_write(&hba->clk_scaling_lock);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, true);
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -8828,11 +8834,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
- down_write(&hba->clk_scaling_lock);
- hba->clk_scaling.is_allowed = true;
- up_write(&hba->clk_scaling_lock);
- if (hba->clk_scaling.is_enabled)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
+
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba);
@@ -8934,11 +8938,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = false;
- down_write(&hba->clk_scaling_lock);
- hba->clk_scaling.is_allowed = true;
- up_write(&hba->clk_scaling_lock);
- if (hba->clk_scaling.is_enabled)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
--
2.18.0
Powered by blists - more mailing lists