[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200720152826.195622807@linuxfoundation.org>
Date: Mon, 20 Jul 2020 17:36:48 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Maulik Shah <mkshah@...eaurora.org>,
Srinivas Rao L <lsrao@...eaurora.org>,
Evan Green <evgreen@...omium.org>,
Douglas Anderson <dianders@...omium.org>,
Stephen Boyd <swboyd@...omium.org>,
Bjorn Andersson <bjorn.andersson@...aro.org>
Subject: [PATCH 5.4 126/215] soc: qcom: rpmh: Update dirty flag only when data changes
From: Maulik Shah <mkshah@...eaurora.org>
commit bb7000677a1b287206c8d4327c62442fa3050a8f upstream.
Currently rpmh ctrlr dirty flag is set for all cases regardless of data
is really changed or not. Add changes to update dirty flag when data is
changed to newer values. Update dirty flag everytime when data in batch
cache is updated since rpmh_flush() may get invoked from any CPU instead
of only last CPU going to low power mode.
Also move dirty flag updates to happen from within cache_lock and remove
unnecessary INIT_LIST_HEAD() call and a default case from switch.
Fixes: 600513dfeef3 ("drivers: qcom: rpmh: cache sleep/wake state requests")
Signed-off-by: Maulik Shah <mkshah@...eaurora.org>
Reviewed-by: Srinivas Rao L <lsrao@...eaurora.org>
Reviewed-by: Evan Green <evgreen@...omium.org>
Reviewed-by: Douglas Anderson <dianders@...omium.org>
Reviewed-by: Stephen Boyd <swboyd@...omium.org>
Link: https://lore.kernel.org/r/1586703004-13674-3-git-send-email-mkshah@codeaurora.org
Signed-off-by: Bjorn Andersson <bjorn.andersson@...aro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/soc/qcom/rpmh.c | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -119,6 +119,7 @@ static struct cache_req *cache_rpm_reque
{
struct cache_req *req;
unsigned long flags;
+ u32 old_sleep_val, old_wake_val;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
req = __find_req(ctrlr, cmd->addr);
@@ -133,26 +134,27 @@ static struct cache_req *cache_rpm_reque
req->addr = cmd->addr;
req->sleep_val = req->wake_val = UINT_MAX;
- INIT_LIST_HEAD(&req->list);
list_add_tail(&req->list, &ctrlr->cache);
existing:
+ old_sleep_val = req->sleep_val;
+ old_wake_val = req->wake_val;
+
switch (state) {
case RPMH_ACTIVE_ONLY_STATE:
- if (req->sleep_val != UINT_MAX)
- req->wake_val = cmd->data;
- break;
case RPMH_WAKE_ONLY_STATE:
req->wake_val = cmd->data;
break;
case RPMH_SLEEP_STATE:
req->sleep_val = cmd->data;
break;
- default:
- break;
}
- ctrlr->dirty = true;
+ ctrlr->dirty = (req->sleep_val != old_sleep_val ||
+ req->wake_val != old_wake_val) &&
+ req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX;
+
unlock:
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
@@ -287,6 +289,7 @@ static void cache_batch(struct rpmh_ctrl
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_add_tail(&req->list, &ctrlr->batch_cache);
+ ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
@@ -323,6 +326,7 @@ static void invalidate_batch(struct rpmh
list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
kfree(req);
INIT_LIST_HEAD(&ctrlr->batch_cache);
+ ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
@@ -509,7 +513,6 @@ int rpmh_invalidate(const struct device
int ret;
invalidate_batch(ctrlr);
- ctrlr->dirty = true;
do {
ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
Powered by blists - more mailing lists