[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d2f19c425ae3cfedfaa9a7c9f5bb9e8f@codeaurora.org>
Date: Tue, 27 Aug 2019 10:21:08 +0530
From: amasule@...eaurora.org
To: Stanimir Varbanov <stanimir.varbanov@...aro.org>
Cc: linux-media@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-msm@...r.kernel.org, vgarodia@...eaurora.org
Subject: Re: [PATCH v6 2/4] media: venus: Update clock scaling
Hi Stan,
On 2019-07-29 14:05, Stanimir Varbanov wrote:
> Hi Aniket,
>
> On 7/22/19 12:07 PM, Aniket Masule wrote:
>> Current clock scaling calculations are same for vpu4 and
>> previous versions. For vpu4, Clock scaling calculations
>> are updated with cycles/mb. This helps in getting precise
>> clock required.
>>
>> Signed-off-by: Aniket Masule <amasule@...eaurora.org>
>> ---
>> drivers/media/platform/qcom/venus/helpers.c | 91
>> +++++++++++++++++++++++++++--
>> 1 file changed, 87 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/media/platform/qcom/venus/helpers.c
>> b/drivers/media/platform/qcom/venus/helpers.c
>> index 7492373..2c976e4 100644
>> --- a/drivers/media/platform/qcom/venus/helpers.c
>> +++ b/drivers/media/platform/qcom/venus/helpers.c
>> @@ -348,8 +348,9 @@ static u32 load_per_type(struct venus_core *core,
>> u32 session_type)
>> return mbs_per_sec;
>> }
>>
>> -static int load_scale_clocks(struct venus_core *core)
>> +static int scale_clocks(struct venus_inst *inst)
>> {
>> + struct venus_core *core = inst->core;
>> const struct freq_tbl *table = core->res->freq_tbl;
>> unsigned int num_rows = core->res->freq_tbl_size;
>> unsigned long freq = table[0].freq;
>> @@ -398,6 +399,89 @@ static int load_scale_clocks(struct venus_core
>> *core)
>> return ret;
>> }
>>
>> +static unsigned long calculate_vpp_freq(struct venus_inst *inst)
>> +{
>> + unsigned long vpp_freq = 0;
>> + u32 mbs_per_sec;
>> +
>> + mbs_per_sec = load_per_instance(inst);
>> + vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
>> + /* 21 / 20 is overhead factor */
>> + vpp_freq += vpp_freq / 20;
>> +
>> + return vpp_freq;
>> +}
>> +
>> +static int scale_clocks_v4(struct venus_inst *inst)
>> +{
>> + struct venus_core *core = inst->core;
>> + const struct freq_tbl *table = core->res->freq_tbl;
>> + unsigned int num_rows = core->res->freq_tbl_size;
>> + struct clk *clk = core->clks[0];
>> + struct device *dev = core->dev;
>> + unsigned int i;
>> + unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
>> + int ret;
>> +
>> + freq = calculate_vpp_freq(inst);
>> +
>> + if (freq > table[0].freq)
>> + dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
>> + freq, table[0].freq);
>> +
>
> ...
>
>> + for (i = 0; i < num_rows; i++) {
>> + if (freq > table[i].freq)
>> + break;
>> + freq = table[i].freq;
>> + }
>
> The above code snippet will select the biggest table[0].freq always.
> Infact do we need to "normalize" the calculated freq to the table of
> possible clock rates? I think tjat should be made after sum all needed
> frequencies for all cores.
>
Will correct this. Normalizing after sum of all needed frequencies
would be more precise.
>> +
>> + inst->clk_data.freq = freq;
>> +
>> + mutex_lock(&core->lock);
>> + list_for_each_entry(inst, &core->instances, list) {
>> + if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
>> + freq_core1 += inst->clk_data.freq;
>> + } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
>> + freq_core2 += inst->clk_data.freq;
>> + } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
>> + freq_core1 += inst->clk_data.freq;
>> + freq_core2 += inst->clk_data.freq;
>> + }
>> + }
>> + mutex_unlock(&core->lock);
>> +
>> + freq = max(freq_core1, freq_core2);
>> +
>> + ret = clk_set_rate(clk, freq);
>> + if (ret)
>> + goto err;
>> +
>> + ret = clk_set_rate(core->core0_clk, freq);
>> + if (ret)
>> + goto err;
>> +
>> + ret = clk_set_rate(core->core1_clk, freq);
>> + if (ret)
>> + goto err;
>
> This is duplicated in both scale_clocks and scale_clocks_v4, and could
> be a common function.
>
I will move this to common function.
>> +
>> + return 0;
>> +
>> +err:
>> + dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
>> + return ret;
>> +}
>> +
>> +static int load_scale_clocks(struct venus_inst *inst)
>> +{
>> + if (IS_V4(inst->core))
>> + return scale_clocks_v4(inst);
>> +
>> + if (inst->state == INST_START)
>> + return 0;
>
> I'm still not sure about this check of the instance state.
>
> If we look into load_per_instance() it already doing similar check :
>
> !(inst->state >= INST_INIT && inst->state < INST_STOP)
>
>
This check would just make sure instance state for scaling,
not a mandatory check. I can remove the check.
>> +
>> + return scale_clocks(inst);
>> +}
>> +
>> static void fill_buffer_desc(const struct venus_buffer *buf,
>> struct hfi_buffer_desc *bd, bool response)
>> {
>> @@ -1053,7 +1137,7 @@ void venus_helper_vb2_stop_streaming(struct
>> vb2_queue *q)
>>
>> venus_helper_free_dpb_bufs(inst);
>>
>> - load_scale_clocks(core);
>> + load_scale_clocks(inst);
>> INIT_LIST_HEAD(&inst->registeredbufs);
>> }
>>
>> @@ -1070,7 +1154,6 @@ void venus_helper_vb2_stop_streaming(struct
>> vb2_queue *q)
>>
>> int venus_helper_vb2_start_streaming(struct venus_inst *inst)
>> {
>> - struct venus_core *core = inst->core;
>> int ret;
>>
>> ret = intbufs_alloc(inst);
>> @@ -1081,7 +1164,7 @@ int venus_helper_vb2_start_streaming(struct
>> venus_inst *inst)
>> if (ret)
>> goto err_bufs_free;
>>
>> - load_scale_clocks(core);
>> + load_scale_clocks(inst);
>>
>> ret = hfi_session_load_res(inst);
>> if (ret)
>>
Regards,
Aniket
Powered by blists - more mailing lists