[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220330150551.2573938-19-cristian.marussi@arm.com>
Date: Wed, 30 Mar 2022 16:05:47 +0100
From: Cristian Marussi <cristian.marussi@....com>
To: linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org
Cc: sudeep.holla@....com, james.quinlan@...adcom.com,
Jonathan.Cameron@...wei.com, f.fainelli@...il.com,
etienne.carriere@...aro.org, vincent.guittot@...aro.org,
souvik.chakravarty@....com, cristian.marussi@....com
Subject: [PATCH 18/22] firmware: arm_scmi: Use common iterators in Perf protocol
Make SCMI Perf protocol use the common iterator protocol helpers.
Signed-off-by: Cristian Marussi <cristian.marussi@....com>
---
drivers/firmware/arm_scmi/perf.c | 117 ++++++++++++++++++-------------
1 file changed, 69 insertions(+), 48 deletions(-)
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 9e046fd121b9..e1aa0ed67971 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -272,66 +272,87 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
return t1->perf - t2->perf;
}
-static int
-scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
- struct perf_dom_info *perf_dom)
+struct scmi_perf_ipriv {
+ u32 domain;
+ struct perf_dom_info *perf_dom;
+};
+
+static void iter_perf_levels_prepare_message(void *message,
+ unsigned int desc_index,
+ const void *priv)
{
- int ret, cnt;
- u32 tot_opp_cnt = 0;
- u16 num_returned, num_remaining;
- struct scmi_xfer *t;
- struct scmi_opp *opp;
- struct scmi_msg_perf_describe_levels *dom_info;
- struct scmi_msg_resp_perf_describe_levels *level_info;
+ struct scmi_msg_perf_describe_levels *msg = message;
+ const struct scmi_perf_ipriv *p = priv;
- ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
- sizeof(*dom_info), 0, &t);
- if (ret)
- return ret;
+ msg->domain = cpu_to_le32(p->domain);
+ /* Set the number of OPPs to be skipped/already read */
+ msg->level_index = cpu_to_le32(desc_index);
+}
- dom_info = t->tx.buf;
- level_info = t->rx.buf;
+static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ const struct scmi_msg_resp_perf_describe_levels *r = response;
- do {
- dom_info->domain = cpu_to_le32(domain);
- /* Set the number of OPPs to be skipped/already read */
- dom_info->level_index = cpu_to_le32(tot_opp_cnt);
+ st->num_returned = le16_to_cpu(r->num_returned);
+ st->num_remaining = le16_to_cpu(r->num_remaining);
- ret = ph->xops->do_xfer(ph, t);
- if (ret)
- break;
+ return 0;
+}
- num_returned = le16_to_cpu(level_info->num_returned);
- num_remaining = le16_to_cpu(level_info->num_remaining);
- if (tot_opp_cnt + num_returned > MAX_OPPS) {
- dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
- break;
- }
+static int
+iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ struct scmi_opp *opp;
+ const struct scmi_msg_resp_perf_describe_levels *r = response;
+ struct scmi_perf_ipriv *p = priv;
- opp = &perf_dom->opp[tot_opp_cnt];
- for (cnt = 0; cnt < num_returned; cnt++, opp++) {
- opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
- opp->power = le32_to_cpu(level_info->opp[cnt].power);
- opp->trans_latency_us = le16_to_cpu
- (level_info->opp[cnt].transition_latency_us);
+ opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
+ opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val);
+ opp->power = le32_to_cpu(r->opp[st->loop_idx].power);
+ opp->trans_latency_us =
+ le16_to_cpu(r->opp[st->loop_idx].transition_latency_us);
+ p->perf_dom->opp_count++;
- dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
- opp->perf, opp->power, opp->trans_latency_us);
- }
+ dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
+ opp->perf, opp->power, opp->trans_latency_us);
- tot_opp_cnt += num_returned;
+ return 0;
+}
- ph->xops->reset_rx_to_maxsz(ph, t);
- /*
- * check for both returned and remaining to avoid infinite
- * loop due to buggy firmware
- */
- } while (num_returned && num_remaining);
+static int
+scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
+ struct perf_dom_info *perf_dom)
+{
+ int ret;
+ void *iter;
+ struct scmi_msg_perf_describe_levels *msg;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_perf_levels_prepare_message,
+ .update_state = iter_perf_levels_update_state,
+ .process_response = iter_perf_levels_process_response,
+ };
+ struct scmi_perf_ipriv ppriv = {
+ .domain = domain,
+ .perf_dom = perf_dom,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
+ PERF_DESCRIBE_LEVELS,
+ sizeof(*msg), &ppriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ return ret;
- perf_dom->opp_count = tot_opp_cnt;
- ph->xops->xfer_put(ph, t);
+ if (perf_dom->opp_count)
+ sort(perf_dom->opp, perf_dom->opp_count,
+ sizeof(struct scmi_opp), opp_cmp_func, NULL);
- sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
return ret;
}
--
2.32.0
Powered by blists - more mailing lists