[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <f58ff36d7e24716994f2de22be461602fb49b6d5.1606186717.git.baolin.wang@linux.alibaba.com>
Date: Tue, 24 Nov 2020 11:33:36 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: axboe@...nel.dk, tj@...nel.org
Cc: baolin.wang@...ux.alibaba.com, baolin.wang7@...il.com,
linux-block@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 7/7] blk-iocost: Factor out the base vrate change into a separate function
Factor out the base vrate change code into a separate function
to fimplify the ioc_timer_fn().
No functional change.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
block/blk-iocost.c | 78 ++++++++++++++++++++++++++++--------------------------
1 file changed, 41 insertions(+), 37 deletions(-)
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index db4f894..739c8d4 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -968,6 +968,44 @@ static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
}
+static void ioc_refresh_base_vrate(struct ioc *ioc, u32 rq_wait_pct)
+{
+ u64 vrate = ioc->vtime_base_rate;
+ u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
+
+ /* rq_wait signal is always reliable, ignore user vrate_min */
+ if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
+ vrate_min = VRATE_MIN;
+
+ /*
+ * If vrate is out of bounds, apply clamp gradually as the
+ * bounds can change abruptly. Otherwise, apply busy_level
+ * based adjustment.
+ */
+ if (vrate < vrate_min) {
+ vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
+ vrate = min(vrate, vrate_min);
+ } else if (vrate > vrate_max) {
+ vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
+ vrate = max(vrate, vrate_max);
+ } else {
+ int idx = min_t(int, abs(ioc->busy_level),
+ ARRAY_SIZE(vrate_adj_pct) - 1);
+ u32 adj_pct = vrate_adj_pct[idx];
+
+ if (ioc->busy_level > 0)
+ adj_pct = 100 - adj_pct;
+ else
+ adj_pct = 100 + adj_pct;
+
+ vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
+ vrate_min, vrate_max);
+ }
+
+ ioc->vtime_base_rate = vrate;
+ ioc_refresh_margins(ioc);
+}
+
/* take a snapshot of the current [v]time and vrate */
static void ioc_now(struct ioc *ioc, struct ioc_now *now)
{
@@ -2320,45 +2358,11 @@ static void ioc_timer_fn(struct timer_list *timer)
ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
- u64 vrate = ioc->vtime_base_rate;
- u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
-
- /* rq_wait signal is always reliable, ignore user vrate_min */
- if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
- vrate_min = VRATE_MIN;
-
- /*
- * If vrate is out of bounds, apply clamp gradually as the
- * bounds can change abruptly. Otherwise, apply busy_level
- * based adjustment.
- */
- if (vrate < vrate_min) {
- vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
- 100);
- vrate = min(vrate, vrate_min);
- } else if (vrate > vrate_max) {
- vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
- 100);
- vrate = max(vrate, vrate_max);
- } else {
- int idx = min_t(int, abs(ioc->busy_level),
- ARRAY_SIZE(vrate_adj_pct) - 1);
- u32 adj_pct = vrate_adj_pct[idx];
-
- if (ioc->busy_level > 0)
- adj_pct = 100 - adj_pct;
- else
- adj_pct = 100 + adj_pct;
+ ioc_refresh_base_vrate(ioc, rq_wait_pct);
- vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
- vrate_min, vrate_max);
- }
-
- trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
+ trace_iocost_ioc_vrate_adj(ioc, ioc->vtime_base_rate,
+ missed_ppm, rq_wait_pct,
nr_lagging, nr_shortages);
-
- ioc->vtime_base_rate = vrate;
- ioc_refresh_margins(ioc);
} else if (ioc->busy_level != prev_busy_level || nr_lagging) {
trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
missed_ppm, rq_wait_pct, nr_lagging,
--
1.8.3.1
Powered by blists - more mailing lists