[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241006152849.247152-2-yizhou.tang@shopee.com>
Date: Sun, 6 Oct 2024 23:28:47 +0800
From: Tang Yizhou <yizhou.tang@...pee.com>
To: jack@...e.cz,
hch@...radead.org,
willy@...radead.org,
akpm@...ux-foundation.org,
chandan.babu@...cle.com
Cc: linux-kernel@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
linux-xfs@...r.kernel.org,
Tang Yizhou <yizhou.tang@...pee.com>
Subject: [PATCH v2 1/3] mm/page-writeback.c: Rename BANDWIDTH_INTERVAL to BW_DIRTYLIMIT_INTERVAL
From: Tang Yizhou <yizhou.tang@...pee.com>
The name of the BANDWIDTH_INTERVAL macro is misleading, as it is not
only used in the bandwidth update functions wb_update_bandwidth() and
__wb_update_bandwidth(), but also in the dirty limit update function
domain_update_dirty_limit().
Rename BANDWIDTH_INTERVAL to BW_DIRTYLIMIT_INTERVAL to make things clear.
This patche doesn't introduce any behavioral changes.
v2: Rename UPDATE_INTERVAL to BW_DIRTYLIMIT_INTERVAL.
Signed-off-by: Tang Yizhou <yizhou.tang@...pee.com>
---
mm/page-writeback.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index fcd4c1439cb9..3af7bc078dc0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -54,9 +54,9 @@
#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
/*
- * Estimate write bandwidth at 200ms intervals.
+ * Estimate write bandwidth or update dirty limit at 200ms intervals.
*/
-#define BANDWIDTH_INTERVAL max(HZ/5, 1)
+#define BW_DIRTYLIMIT_INTERVAL max(HZ/5, 1)
#define RATELIMIT_CALC_SHIFT 10
@@ -1331,11 +1331,11 @@ static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
/*
* check locklessly first to optimize away locking for the most time
*/
- if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
+ if (time_before(now, dom->dirty_limit_tstamp + BW_DIRTYLIMIT_INTERVAL))
return;
spin_lock(&dom->lock);
- if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
+ if (time_after_eq(now, dom->dirty_limit_tstamp + BW_DIRTYLIMIT_INTERVAL)) {
update_dirty_limit(dtc);
dom->dirty_limit_tstamp = now;
}
@@ -1928,7 +1928,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
wb->dirty_exceeded = gdtc->dirty_exceeded ||
(mdtc && mdtc->dirty_exceeded);
if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
- BANDWIDTH_INTERVAL))
+ BW_DIRTYLIMIT_INTERVAL))
__wb_update_bandwidth(gdtc, mdtc, true);
/* throttle according to the chosen dtc */
@@ -2705,7 +2705,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
* writeback bandwidth is updated once in a while.
*/
if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
- BANDWIDTH_INTERVAL))
+ BW_DIRTYLIMIT_INTERVAL))
wb_update_bandwidth(wb);
return ret;
}
@@ -3057,14 +3057,14 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb)
atomic_dec(&wb->writeback_inodes);
/*
* Make sure estimate of writeback throughput gets updated after
- * writeback completed. We delay the update by BANDWIDTH_INTERVAL
+ * writeback completed. We delay the update by BW_DIRTYLIMIT_INTERVAL
* (which is the interval other bandwidth updates use for batching) so
* that if multiple inodes end writeback at a similar time, they get
* batched into one bandwidth update.
*/
spin_lock_irqsave(&wb->work_lock, flags);
if (test_bit(WB_registered, &wb->state))
- queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
+ queue_delayed_work(bdi_wq, &wb->bw_dwork, BW_DIRTYLIMIT_INTERVAL);
spin_unlock_irqrestore(&wb->work_lock, flags);
}
--
2.25.1
Powered by blists - more mailing lists