lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 24 Jan 2022 19:41:37 +0100
From:   Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To:     linux-kernel@...r.kernel.org
Cc:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        stable@...r.kernel.org,
        Mansur Alisha Shaik <mansur@...eaurora.org>,
        Stanimir Varbanov <stanimir.varbanov@...aro.org>,
        Mauro Carvalho Chehab <mchehab+huawei@...nel.org>,
        Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.10 332/563] media: venus: avoid calling core_clk_setrate() concurrently during concurrent video sessions

From: Mansur Alisha Shaik <mansur@...eaurora.org>

[ Upstream commit 91f2b7d269e5c885c38c7ffa261f5276bd42f907 ]

In existing implementation, core_clk_setrate() is getting called
concurrently in concurrent video sessions. Before the previous call to
core_clk_setrate returns, new call to core_clk_setrate is invoked from
another video session running concurrently. This results in latest
calculated frequency being set (higher/lower) instead of actual frequency
required for that video session. It also results in stability crashes
mention below. These resources are specific to video core, hence keeping
under core lock would ensure that they are estimated for all running video
sessions and called once for the video core.

Crash logs:

[    1.900089] WARNING: CPU: 4 PID: 1 at drivers/opp/debugfs.c:33 opp_debug_remove_one+0x2c/0x48
[    1.908493] Modules linked in:
[    1.911524] CPU: 4 PID: 1 Comm: swapper/0 Not tainted 5.10.67 #35 f8edb8c30cf2dd6838495dd9ef9be47af7f5f60c
[    1.921036] Hardware name: Qualcomm Technologies, Inc. sc7280 IDP SKU2 platform (DT)
[    1.928673] pstate: 60800009 (nZCv daif -PAN +UAO -TCO BTYPE=--)
[    1.934608] pc : opp_debug_remove_one+0x2c/0x48
[    1.939080] lr : opp_debug_remove_one+0x2c/0x48
[    1.943560] sp : ffffffc011d7b7f0
[    1.946836] pmr_save: 000000e0
[    1.949854] x29: ffffffc011d7b7f0 x28: ffffffc010733bbc
[    1.955104] x27: ffffffc010733ba8 x26: ffffff8083cedd00
[    1.960355] x25: 0000000000000001 x24: 0000000000000000
[    1.965603] x23: ffffff8083cc2878 x22: ffffff8083ceb900
[    1.970852] x21: ffffff8083ceb910 x20: ffffff8083cc2800
[    1.976101] x19: ffffff8083ceb900 x18: 00000000ffff0a10
[    1.981352] x17: ffffff80837a5620 x16: 00000000000000ec
[    1.986601] x15: ffffffc010519ad4 x14: 0000000000000003
[    1.991849] x13: 0000000000000004 x12: 0000000000000001
[    1.997100] x11: c0000000ffffdfff x10: 00000000ffffffff
[    2.002348] x9 : d2627c580300dc00 x8 : d2627c580300dc00
[    2.007596] x7 : 0720072007200720 x6 : ffffff80802ecf00
[    2.012845] x5 : 0000000000190004 x4 : 0000000000000000
[    2.018094] x3 : ffffffc011d7b478 x2 : ffffffc011d7b480
[    2.023343] x1 : 00000000ffffdfff x0 : 0000000000000017
[    2.028594] Call trace:
[    2.031022]  opp_debug_remove_one+0x2c/0x48
[    2.035160]  dev_pm_opp_put+0x94/0xb0
[    2.038780]  _opp_remove_all+0x7c/0xc8
[    2.042486]  _opp_remove_all_static+0x54/0x7c
[    2.046796]  dev_pm_opp_remove_table+0x74/0x98
[    2.051183]  devm_pm_opp_of_table_release+0x18/0x24
[    2.056001]  devm_action_release+0x1c/0x28
[    2.060053]  release_nodes+0x23c/0x2b8
[    2.063760]  devres_release_group+0xcc/0xd0
[    2.067900]  component_bind+0xac/0x168
[    2.071608]  component_bind_all+0x98/0x124
[    2.075664]  msm_drm_bind+0x1e8/0x678
[    2.079287]  try_to_bring_up_master+0x60/0x134
[    2.083674]  component_master_add_with_match+0xd8/0x120
[    2.088834]  msm_pdev_probe+0x20c/0x2a0
[    2.092629]  platform_drv_probe+0x9c/0xbc
[    2.096598]  really_probe+0x11c/0x46c
[    2.100217]  driver_probe_device+0x8c/0xf0
[    2.104270]  device_driver_attach+0x54/0x78
[    2.108407]  __driver_attach+0x48/0x148
[    2.112201]  bus_for_each_dev+0x88/0xd4
[    2.115998]  driver_attach+0x2c/0x38
[    2.119534]  bus_add_driver+0x10c/0x200
[    2.123330]  driver_register+0x6c/0x104
[    2.127122]  __platform_driver_register+0x4c/0x58
[    2.131767]  msm_drm_register+0x6c/0x70
[    2.135560]  do_one_initcall+0x64/0x23c
[    2.139357]  do_initcall_level+0xac/0x15c
[    2.143321]  do_initcalls+0x5c/0x9c
[    2.146778]  do_basic_setup+0x2c/0x38
[    2.150401]  kernel_init_freeable+0xf8/0x15c
[    2.154622]  kernel_init+0x1c/0x11c
[    2.158079]  ret_from_fork+0x10/0x30
[    2.161615] ---[ end trace a2cc45a0f784b212 ]---

[    2.166272] Removing OPP: 300000000

Signed-off-by: Mansur Alisha Shaik <mansur@...eaurora.org>
Signed-off-by: Stanimir Varbanov <stanimir.varbanov@...aro.org>
Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@...nel.org>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
 .../media/platform/qcom/venus/pm_helpers.c    | 28 +++++++++----------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 63095d70f8d82..710f9a2b132b0 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -147,14 +147,12 @@ static u32 load_per_type(struct venus_core *core, u32 session_type)
 	struct venus_inst *inst = NULL;
 	u32 mbs_per_sec = 0;
 
-	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
 		if (inst->session_type != session_type)
 			continue;
 
 		mbs_per_sec += load_per_instance(inst);
 	}
-	mutex_unlock(&core->lock);
 
 	return mbs_per_sec;
 }
@@ -203,14 +201,12 @@ static int load_scale_bw(struct venus_core *core)
 	struct venus_inst *inst = NULL;
 	u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
 
-	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
 		mbs_per_sec = load_per_instance(inst);
 		mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
 		total_avg += avg;
 		total_peak += peak;
 	}
-	mutex_unlock(&core->lock);
 
 	/*
 	 * keep minimum bandwidth vote for "video-mem" path,
@@ -237,8 +233,9 @@ static int load_scale_v1(struct venus_inst *inst)
 	struct device *dev = core->dev;
 	u32 mbs_per_sec;
 	unsigned int i;
-	int ret;
+	int ret = 0;
 
+	mutex_lock(&core->lock);
 	mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
 		      load_per_type(core, VIDC_SESSION_TYPE_DEC);
 
@@ -263,17 +260,19 @@ set_freq:
 	if (ret) {
 		dev_err(dev, "failed to set clock rate %lu (%d)\n",
 			freq, ret);
-		return ret;
+		goto exit;
 	}
 
 	ret = load_scale_bw(core);
 	if (ret) {
 		dev_err(dev, "failed to set bandwidth (%d)\n",
 			ret);
-		return ret;
+		goto exit;
 	}
 
-	return 0;
+exit:
+	mutex_unlock(&core->lock);
+	return ret;
 }
 
 static int core_get_v1(struct venus_core *core)
@@ -960,13 +959,13 @@ static int load_scale_v4(struct venus_inst *inst)
 	struct device *dev = core->dev;
 	unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
 	unsigned long filled_len = 0;
-	int i, ret;
+	int i, ret = 0;
 
 	for (i = 0; i < inst->num_input_bufs; i++)
 		filled_len = max(filled_len, inst->payloads[i]);
 
 	if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
-		return 0;
+		return ret;
 
 	freq = calculate_inst_freq(inst, filled_len);
 	inst->clk_data.freq = freq;
@@ -982,7 +981,6 @@ static int load_scale_v4(struct venus_inst *inst)
 			freq_core2 += inst->clk_data.freq;
 		}
 	}
-	mutex_unlock(&core->lock);
 
 	freq = max(freq_core1, freq_core2);
 
@@ -1006,17 +1004,19 @@ set_freq:
 	if (ret) {
 		dev_err(dev, "failed to set clock rate %lu (%d)\n",
 			freq, ret);
-		return ret;
+		goto exit;
 	}
 
 	ret = load_scale_bw(core);
 	if (ret) {
 		dev_err(dev, "failed to set bandwidth (%d)\n",
 			ret);
-		return ret;
+		goto exit;
 	}
 
-	return 0;
+exit:
+	mutex_unlock(&core->lock);
+	return ret;
 }
 
 static const struct venus_pm_ops pm_ops_v4 = {
-- 
2.34.1



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ