[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d8fb8806-df3f-dd30-9d40-79667cf5cc37@linaro.org>
Date: Mon, 29 Jul 2019 12:46:38 +0300
From: Stanimir Varbanov <stanimir.varbanov@...aro.org>
To: Aniket Masule <amasule@...eaurora.org>, linux-media@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, linux-arm-msm@...r.kernel.org,
vgarodia@...eaurora.org
Subject: Re: [PATCH v6 4/4] media: venus: Update core selection
Hi,
On 7/22/19 12:07 PM, Aniket Masule wrote:
> Present core assignment is static. Introduced load balancing
> across the cores. Load on earch core is calculated and core
> with minimum load is assigned to given instance.
>
> Signed-off-by: Aniket Masule <amasule@...eaurora.org>
> ---
> drivers/media/platform/qcom/venus/helpers.c | 69 +++++++++++++++++++++++---
> drivers/media/platform/qcom/venus/helpers.h | 2 +-
> drivers/media/platform/qcom/venus/hfi_helper.h | 1 +
> drivers/media/platform/qcom/venus/hfi_parser.h | 5 ++
> drivers/media/platform/qcom/venus/vdec.c | 2 +-
> drivers/media/platform/qcom/venus/venc.c | 2 +-
> 6 files changed, 72 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
> index edf403d..3b6cbbf 100644
> --- a/drivers/media/platform/qcom/venus/helpers.c
> +++ b/drivers/media/platform/qcom/venus/helpers.c
> @@ -26,6 +26,7 @@
> #include "helpers.h"
> #include "hfi_helper.h"
> #include "hfi_venus_io.h"
> +#include "hfi_parser.h"
>
> struct intbuf {
> struct list_head list;
> @@ -331,6 +332,24 @@ static u32 load_per_instance(struct venus_inst *inst)
> return mbs * inst->fps;
> }
>
> +static u32 load_per_core(struct venus_core *core, u32 core_id)
> +{
> + struct venus_inst *inst = NULL;
> + u32 mbs_per_sec = 0, load = 0;
> +
> + mutex_lock(&core->lock);
> + list_for_each_entry(inst, &core->instances, list) {
> + if (inst->clk_data.core_id != core_id)
> + continue;
> +
> + mbs_per_sec = load_per_instance(inst);
> + load += mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
> + }
> + mutex_unlock(&core->lock);
> +
> + return load;
> +}
> +
> static u32 load_per_type(struct venus_core *core, u32 session_type)
> {
> struct venus_inst *inst = NULL;
> @@ -505,6 +524,16 @@ static int load_scale_clocks(struct venus_inst *inst)
> return scale_clocks(inst);
> }
>
> +int set_core_usage(struct venus_inst *inst, u32 usage)
> +{
> + const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
> + struct hfi_videocores_usage_type cu;
> +
> + cu.video_core_enable_mask = usage;
> +
> + return hfi_session_set_property(inst, ptype, &cu);
> +}
> +
> static void fill_buffer_desc(const struct venus_buffer *buf,
> struct hfi_buffer_desc *bd, bool response)
> {
> @@ -808,19 +837,47 @@ int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
> }
> EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
>
> -int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
> +int venus_helper_set_core(struct venus_inst *inst)
> {
> - const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
> - struct hfi_videocores_usage_type cu;
> + struct venus_core *core = inst->core;
> + u32 min_core_id = 0, core1_load = 0, core2_load = 0;
> + unsigned long min_load, max_freq, cur_inst_load;
> + u32 cores_max;
> + int ret;
>
> if (!IS_V4(inst->core))
> return 0;
>
> - cu.video_core_enable_mask = usage;
> + core1_load = load_per_core(core, VIDC_CORE_ID_1);
> + core2_load = load_per_core(core, VIDC_CORE_ID_2);
> + min_core_id = core1_load < core2_load ? VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
> + min_load = min(core1_load, core2_load);
> + cores_max = core_num_max(inst);
>
> - return hfi_session_set_property(inst, ptype, &cu);
> + if (cores_max < VIDC_CORE_ID_2) {
> + min_core_id = VIDC_CORE_ID_1;
> + min_load = core1_load;
> + }
> +
> + cur_inst_load = load_per_instance(inst) *
> + inst->clk_data.codec_freq_data->vpp_freq;
> + max_freq = core->res->freq_tbl[0].freq;
> +
> + if ((cur_inst_load + min_load) > max_freq) {
> + dev_warn(core->dev, "HW is overloaded, needed: %lu max: %lu\n",
> + cur_inst_load, max_freq);
> + return -EINVAL;
> + }
> +
> + ret = set_core_usage(inst, min_core_id);
We have a problem here. Lets assume that we have only one running
decoder session and the code above decides that it should be handled by
core2, but core2 clocks presently are enabled only if there is an
encoder session (see DT subnodes), thus we select core2 but without
enabling core2 clocks and power domain.
> + if (ret)
> + return ret;
> +
> + inst->clk_data.core_id = min_core_id;
> +
> + return 0;
> }
> -EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
> +EXPORT_SYMBOL_GPL(venus_helper_set_core);
>
--
regards,
Stan
Powered by blists - more mailing lists