lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <7627ed02d907b6e03de895b38f82f1f3@codeaurora.org>
Date:   Tue, 02 Jul 2019 10:31:29 +0530
From:   amasule@...eaurora.org
To:     Stanimir Varbanov <stanimir.varbanov@...aro.org>
Cc:     linux-media@...r.kernel.org, linux-kernel@...r.kernel.org,
        linux-arm-msm@...r.kernel.org, vgarodia@...eaurora.org
Subject: Re: [PATCH v3 4/4] media: venus: Update core selection

Hi Stan,

On 2019-07-01 19:28, Stanimir Varbanov wrote:
> Hi,
> 
> On 6/25/19 7:27 PM, Aniket Masule wrote:
>> Present core assignment is static. Introduced load balancing
>> across the cores. Load on earch core is calculated and core
>> with minimum load is assigned to given instance.
>> 
>> Signed-off-by: Aniket Masule <amasule@...eaurora.org>
>> ---
>>  drivers/media/platform/qcom/venus/helpers.c | 52 
>> +++++++++++++++++++++++++----
>>  drivers/media/platform/qcom/venus/helpers.h |  2 +-
>>  drivers/media/platform/qcom/venus/vdec.c    |  2 +-
>>  drivers/media/platform/qcom/venus/venc.c    |  2 +-
>>  4 files changed, 49 insertions(+), 9 deletions(-)
>> 
>> diff --git a/drivers/media/platform/qcom/venus/helpers.c 
>> b/drivers/media/platform/qcom/venus/helpers.c
>> index b79e83a..ef35fd8 100644
>> --- a/drivers/media/platform/qcom/venus/helpers.c
>> +++ b/drivers/media/platform/qcom/venus/helpers.c
>> @@ -498,6 +498,16 @@ static int load_scale_clocks(struct venus_inst 
>> *inst)
>>  	return scale_clocks(inst);
>>  }
>> 
>> +int set_core_usage(struct venus_inst *inst, u32 usage)
>> +{
>> +	const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
>> +	struct hfi_videocores_usage_type cu;
>> +
>> +	cu.video_core_enable_mask = usage;
>> +
>> +	return hfi_session_set_property(inst, ptype, &cu);
>> +}
>> +
>>  static void fill_buffer_desc(const struct venus_buffer *buf,
>>  			     struct hfi_buffer_desc *bd, bool response)
>>  {
>> @@ -801,19 +811,49 @@ int venus_helper_set_work_mode(struct venus_inst 
>> *inst, u32 mode)
>>  }
>>  EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
>> 
>> -int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
>> +int venus_helper_set_core(struct venus_inst *inst)
>>  {
>> -	const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
>> -	struct hfi_videocores_usage_type cu;
>> +	struct venus_core *core = inst->core;
>> +	u32 min_core_id = 0, core0_load = 0, core1_load = 0;
> 
> the same comment, please use the same counting scheme as for 
> VIDC_CORE_ID_
> 
Yes, I will align this with VIDC_CORE_ID_.
>> +	unsigned long min_load, max_freq, cur_inst_load;
>> +	u32 cores_max;
>> +	int ret;
>> 
>>  	if (!IS_V4(inst->core))
>>  		return 0;
>> 
>> -	cu.video_core_enable_mask = usage;
>> +	core0_load = load_per_core(core, VIDC_CORE_ID_1);
>> +	core1_load = load_per_core(core, VIDC_CORE_ID_2);
>> 
>> -	return hfi_session_set_property(inst, ptype, &cu);
>> +	min_core_id = core0_load < core1_load ? VIDC_CORE_ID_1 : 
>> VIDC_CORE_ID_2;
>> +	min_load = min(core0_load, core1_load);
>> +	cores_max = core_num_max(inst);
>> +
>> +	if (cores_max < VIDC_CORE_ID_2) {
>> +		min_core_id = VIDC_CORE_ID_1;
>> +		min_load = core0_load;
>> +	}
>> +
>> +	cur_inst_load = load_per_instance(inst) *
>> +		inst->clk_data.codec_freq_data->vpp_freq;
>> +	max_freq = core->res->freq_tbl[0].freq;
>> +
>> +	if ((cur_inst_load + min_load)	> max_freq) {
>> +		dev_warn(core->dev, "HW is overloaded, needed: %lu max: %lu\n",
>> +			 cur_inst_load, max_freq);
>> +		return -EINVAL;
>> +	}
>> +
>> +	ret = set_core_usage(inst, min_core_id);
>> +
> 
> please, delete this blank line
> 
>> +	if (ret)
>> +		return ret;
>> +
>> +	inst->clk_data.core_id = min_core_id;
>> +
>> +	return 0;
>>  }
>> -EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
>> +EXPORT_SYMBOL_GPL(venus_helper_set_core);
>> 
>>  int venus_helper_init_codec_freq_data(struct venus_inst *inst)
>>  {
>> diff --git a/drivers/media/platform/qcom/venus/helpers.h 
>> b/drivers/media/platform/qcom/venus/helpers.h
>> index 2c13245..1034111 100644
>> --- a/drivers/media/platform/qcom/venus/helpers.h
>> +++ b/drivers/media/platform/qcom/venus/helpers.h
>> @@ -42,7 +42,7 @@ int venus_helper_set_output_resolution(struct 
>> venus_inst *inst,
>>  				       u32 buftype);
>>  int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
>>  int venus_helper_init_codec_freq_data(struct venus_inst *inst);
>> -int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
>> +int venus_helper_set_core(struct venus_inst *inst);
>>  int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int 
>> input_bufs,
>>  			      unsigned int output_bufs,
>>  			      unsigned int output2_bufs);
>> diff --git a/drivers/media/platform/qcom/venus/vdec.c 
>> b/drivers/media/platform/qcom/venus/vdec.c
>> index d037f80..620e060 100644
>> --- a/drivers/media/platform/qcom/venus/vdec.c
>> +++ b/drivers/media/platform/qcom/venus/vdec.c
>> @@ -551,7 +551,7 @@ static int vdec_output_conf(struct venus_inst 
>> *inst)
>>  	if (ret)
>>  		return ret;
>> 
>> -	ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_1);
>> +	ret = venus_helper_set_core(inst);
>>  	if (ret)
>>  		return ret;
>> 
>> diff --git a/drivers/media/platform/qcom/venus/venc.c 
>> b/drivers/media/platform/qcom/venus/venc.c
>> index cdddc82..28e76cc 100644
>> --- a/drivers/media/platform/qcom/venus/venc.c
>> +++ b/drivers/media/platform/qcom/venus/venc.c
>> @@ -660,7 +660,7 @@ static int venc_set_properties(struct venus_inst 
>> *inst)
>>  	if (ret)
>>  		return ret;
>> 
>> -	ret = venus_helper_set_core_usage(inst, VIDC_CORE_ID_2);
>> +	ret = venus_helper_set_core(inst);
>>  	if (ret)
>>  		return ret;
>> 
>> 

Regards,
Aniket

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ