lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1432333416-6221-8-git-send-email-tj@kernel.org>
Date:	Fri, 22 May 2015 18:23:24 -0400
From:	Tejun Heo <tj@...nel.org>
To:	axboe@...nel.dk
Cc:	linux-kernel@...r.kernel.org, jack@...e.cz, hch@...radead.org,
	hannes@...xchg.org, linux-fsdevel@...r.kernel.org,
	vgoyal@...hat.com, lizefan@...wei.com, cgroups@...r.kernel.org,
	linux-mm@...ck.org, mhocko@...e.cz, clm@...com,
	fengguang.wu@...el.com, david@...morbit.com, gthelen@...gle.com,
	khlebnikov@...dex-team.ru, Tejun Heo <tj@...nel.org>
Subject: [PATCH 07/19] writeback: add dirty_throttle_control->wb_bg_thresh

wb_bg_thresh is currently treated as a second-class citizen.  It's
only used when BDI_CAP_STRICTLIMIT is set and balance_dirty_pages()
doesn't calculate it unless the cap is set.  When the cap is set, the
calculated value is not passed around but instead recalculated
whenever it's used.

wb_position_ratio() calculates it by scaling wb_thresh proportional to
bg_thresh / thresh.  wb_update_dirty_ratelimit() uses wb_dirty_limit()
on bg_thresh, which should generally lead to a similar result as the
proportional scaling but can also be way off in the presence of
max/min_ratio settings.

Avoiding wb_bg_thresh calculation saves us one u64 multiplication and
divsion when BDI_CAP_STRICTLIMIT is not set.  Given that
balance_dirty_pages() is already ratelimited, this doesn't justify the
incurred extra complexity.

This patch adds wb_bg_thresh to dirty_throttle_control and makes
wb_dirty_limits() always calculate it and updates the users to use the
pre-calculated value.

Signed-off-by: Tejun Heo <tj@...nel.org>
Cc: Jens Axboe <axboe@...nel.dk>
Cc: Jan Kara <jack@...e.cz>
Cc: Wu Fengguang <fengguang.wu@...el.com>
Cc: Greg Thelen <gthelen@...gle.com>
---
 mm/page-writeback.c | 27 +++++++++++----------------
 1 file changed, 11 insertions(+), 16 deletions(-)

diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 126e3c8..3ec9223 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -134,6 +134,7 @@ struct dirty_throttle_control {
 
 	unsigned long		wb_dirty;	/* per-wb counterparts */
 	unsigned long		wb_thresh;
+	unsigned long		wb_bg_thresh;
 };
 
 #define GDTC_INIT(__wb)		.wb = (__wb)
@@ -761,7 +762,6 @@ static unsigned long wb_position_ratio(struct dirty_throttle_control *dtc)
 	 */
 	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
 		long long wb_pos_ratio;
-		unsigned long wb_bg_thresh;
 
 		if (dtc->wb_dirty < 8)
 			return min_t(long long, pos_ratio * 2,
@@ -770,9 +770,8 @@ static unsigned long wb_position_ratio(struct dirty_throttle_control *dtc)
 		if (dtc->wb_dirty >= wb_thresh)
 			return 0;
 
-		wb_bg_thresh = div_u64((u64)wb_thresh * dtc->bg_thresh,
-				       dtc->thresh);
-		wb_setpoint = dirty_freerun_ceiling(wb_thresh, wb_bg_thresh);
+		wb_setpoint = dirty_freerun_ceiling(wb_thresh,
+						    dtc->wb_bg_thresh);
 
 		if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
 			return 0;
@@ -1104,15 +1103,14 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
 	 *
 	 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
 	 * it's possible that wb_thresh is close to zero due to inactivity
-	 * of backing device (see the implementation of wb_calc_thresh()).
+	 * of backing device.
 	 */
 	if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
 		dirty = dtc->wb_dirty;
 		if (dtc->wb_dirty < 8)
 			setpoint = dtc->wb_dirty + 1;
 		else
-			setpoint = (dtc->wb_thresh +
-				    wb_calc_thresh(wb, dtc->bg_thresh)) / 2;
+			setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
 	}
 
 	if (dirty < setpoint) {
@@ -1307,8 +1305,7 @@ static long wb_min_pause(struct bdi_writeback *wb,
 	return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
 }
 
-static inline void wb_dirty_limits(struct dirty_throttle_control *dtc,
-				   unsigned long *wb_bg_thresh)
+static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
 {
 	struct bdi_writeback *wb = dtc->wb;
 	unsigned long wb_reclaimable;
@@ -1327,11 +1324,8 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc,
 	 *   at some rate <= (write_bw / 2) for bringing down wb_dirty.
 	 */
 	dtc->wb_thresh = wb_calc_thresh(dtc->wb, dtc->thresh);
-
-	if (wb_bg_thresh)
-		*wb_bg_thresh = dtc->thresh ? div_u64((u64)dtc->wb_thresh *
-						      dtc->bg_thresh,
-						      dtc->thresh) : 0;
+	dtc->wb_bg_thresh = dtc->thresh ?
+		div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
 
 	/*
 	 * In order to avoid the stacked BDI deadlock we need
@@ -1396,10 +1390,11 @@ static void balance_dirty_pages(struct address_space *mapping,
 		global_dirty_limits(&gdtc->bg_thresh, &gdtc->thresh);
 
 		if (unlikely(strictlimit)) {
-			wb_dirty_limits(gdtc, &bg_thresh);
+			wb_dirty_limits(gdtc);
 
 			dirty = gdtc->wb_dirty;
 			thresh = gdtc->wb_thresh;
+			bg_thresh = gdtc->wb_bg_thresh;
 		} else {
 			dirty = gdtc->dirty;
 			thresh = gdtc->thresh;
@@ -1427,7 +1422,7 @@ static void balance_dirty_pages(struct address_space *mapping,
 			wb_start_background_writeback(wb);
 
 		if (!strictlimit)
-			wb_dirty_limits(gdtc, NULL);
+			wb_dirty_limits(gdtc);
 
 		dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
 			((gdtc->dirty > gdtc->thresh) || strictlimit);
-- 
2.4.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ