lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230610-topic-bwmon_opp-v2-1-0d25c1ce7dca@linaro.org>
Date:   Wed, 14 Jun 2023 15:39:59 +0200
From:   Konrad Dybcio <konrad.dybcio@...aro.org>
To:     Krzysztof Kozlowski <krzysztof.kozlowski@...aro.org>,
        Andy Gross <agross@...nel.org>,
        Bjorn Andersson <andersson@...nel.org>
Cc:     Marijn Suijten <marijn.suijten@...ainline.org>,
        linux-arm-msm@...r.kernel.org, linux-kernel@...r.kernel.org,
        Konrad Dybcio <konrad.dybcio@...aro.org>
Subject: [PATCH RFC v2] soc: qcom: icc-bwmon: Set default thresholds
 dynamically

Currently we use predefined initial threshold values. This works, but
does not really scale well with more and more SoCs gaining bwmon support,
as the necessary kickoff values may differ between platforms due to memory
type and/or controller setup.
All of the data we need for that is already provided in the device tree,
anyway.

Change the thresholds to:
* low = 0 (as we've been doing up until now)
* med = high = BW_MIN

Throughput going below the med threshold nudges bwmon into signaling
that we should slow down (e.g. if we inherited too high bandwidth
from the bootloader).

Throughput going above the high threshold nudges bwmon into signaling
that we should speed up so as not to choke the bus traffic due to
insufficient transfer rates.

F_MIN is a perfect initial value for both of these cases - if we go
above it (and there's a 99.99% chance it'll happen at boot time), we
should definitely make the memory go faster, whereas if we go below it,
we should slow down, no matter what performance state we were at before
(it's only possible for them to be >= FMIN).

This only changes the values programmed at probe time, as high and med
thresholds are updated at interrupt, also based on the OPP table from DT.

Signed-off-by: Konrad Dybcio <konrad.dybcio@...aro.org>
---
Changes in v2:
- Drop now-merged dependency
- Drop div4 for the med value
- Better explain the reasoning in the commit message
- Link to v1: https://lore.kernel.org/r/20230610-topic-bwmon_opp-v1-1-65125d7493fc@linaro.org
---
 drivers/soc/qcom/icc-bwmon.c | 28 +++++++---------------------
 1 file changed, 7 insertions(+), 21 deletions(-)

diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index 40068a285913..8daf0eb03279 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -165,9 +165,6 @@ enum bwmon_fields {
 struct icc_bwmon_data {
 	unsigned int sample_ms;
 	unsigned int count_unit_kb; /* kbytes */
-	unsigned int default_highbw_kbps;
-	unsigned int default_medbw_kbps;
-	unsigned int default_lowbw_kbps;
 	u8 zone1_thres_count;
 	u8 zone3_thres_count;
 	unsigned int quirks;
@@ -564,20 +561,21 @@ static void bwmon_set_threshold(struct icc_bwmon *bwmon,
 static void bwmon_start(struct icc_bwmon *bwmon)
 {
 	const struct icc_bwmon_data *data = bwmon->data;
+	u32 bw_low = 0;
 	int window;
 
+	/* No need to check for errors, as this must have succeeded before. */
+	dev_pm_opp_find_bw_ceil(bwmon->dev, &bw_low, 0);
+
 	bwmon_clear_counters(bwmon, true);
 
 	window = mult_frac(bwmon->data->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
 	/* Maximum sampling window: 0xffffff for v4 and 0xfffff for v5 */
 	regmap_field_write(bwmon->regs[F_SAMPLE_WINDOW], window);
 
-	bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH],
-			    data->default_highbw_kbps);
-	bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED],
-			    data->default_medbw_kbps);
-	bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_LOW],
-			    data->default_lowbw_kbps);
+	bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH], bw_low);
+	bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED], bw_low);
+	bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_LOW], 0);
 
 	regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE0],
 			   BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT);
@@ -807,9 +805,6 @@ static int bwmon_remove(struct platform_device *pdev)
 static const struct icc_bwmon_data msm8998_bwmon_data = {
 	.sample_ms = 4,
 	.count_unit_kb = 1024,
-	.default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */
-	.default_medbw_kbps = 512 * 1024, /* 512 MBps */
-	.default_lowbw_kbps = 0,
 	.zone1_thres_count = 16,
 	.zone3_thres_count = 1,
 	.quirks = BWMON_HAS_GLOBAL_IRQ,
@@ -822,9 +817,6 @@ static const struct icc_bwmon_data msm8998_bwmon_data = {
 static const struct icc_bwmon_data sdm845_cpu_bwmon_data = {
 	.sample_ms = 4,
 	.count_unit_kb = 64,
-	.default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */
-	.default_medbw_kbps = 512 * 1024, /* 512 MBps */
-	.default_lowbw_kbps = 0,
 	.zone1_thres_count = 16,
 	.zone3_thres_count = 1,
 	.quirks = BWMON_HAS_GLOBAL_IRQ,
@@ -835,9 +827,6 @@ static const struct icc_bwmon_data sdm845_cpu_bwmon_data = {
 static const struct icc_bwmon_data sdm845_llcc_bwmon_data = {
 	.sample_ms = 4,
 	.count_unit_kb = 1024,
-	.default_highbw_kbps = 800 * 1024, /* 800 MBps */
-	.default_medbw_kbps = 256 * 1024, /* 256 MBps */
-	.default_lowbw_kbps = 0,
 	.zone1_thres_count = 16,
 	.zone3_thres_count = 1,
 	.regmap_fields = sdm845_llcc_bwmon_reg_fields,
@@ -847,9 +836,6 @@ static const struct icc_bwmon_data sdm845_llcc_bwmon_data = {
 static const struct icc_bwmon_data sc7280_llcc_bwmon_data = {
 	.sample_ms = 4,
 	.count_unit_kb = 64,
-	.default_highbw_kbps = 800 * 1024, /* 800 MBps */
-	.default_medbw_kbps = 256 * 1024, /* 256 MBps */
-	.default_lowbw_kbps = 0,
 	.zone1_thres_count = 16,
 	.zone3_thres_count = 1,
 	.quirks = BWMON_NEEDS_FORCE_CLEAR,

---
base-commit: b16049b21162bb649cdd8519642a35972b7910fe
change-id: 20230610-topic-bwmon_opp-f995bbdd18bd

Best regards,
-- 
Konrad Dybcio <konrad.dybcio@...aro.org>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ