lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1307135209-30539-4-git-send-email-vgoyal@redhat.com>
Date:	Fri,  3 Jun 2011 17:06:44 -0400
From:	Vivek Goyal <vgoyal@...hat.com>
To:	linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
	axboe@...nel.dk
Cc:	vgoyal@...hat.com, arighi@...eler.com, fengguang.wu@...el.com,
	jack@...e.cz, akpm@...ux-foundation.org
Subject: [PATCH 3/8] blk-throttle: use IO size and direction as parameters to wait routines

I want to reuse wait routines for task wait also. Hence get rid of
dependency of bio being passed in. Instead pass in direction of IO
and size of IO.

Signed-off-by: Vivek Goyal <vgoyal@...hat.com>
---
 block/blk-throttle.c |   52 +++++++++++++++++++++++--------------------------
 1 files changed, 24 insertions(+), 28 deletions(-)

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1259ce6..541830c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -591,9 +591,8 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 }
 
 static unsigned long tg_wait_iops_limit(struct throtl_data *td,
-			struct throtl_grp *tg, struct bio *bio)
+			struct throtl_grp *tg, bool rw, unsigned int nr_ios)
 {
-	bool rw = bio_data_dir(bio);
 	unsigned int io_allowed;
 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 	u64 tmp;
@@ -621,11 +620,11 @@ static unsigned long tg_wait_iops_limit(struct throtl_data *td,
 	else
 		io_allowed = tmp;
 
-	if (tg->io_disp[rw] + 1 <= io_allowed)
+	if (tg->io_disp[rw] + nr_ios <= io_allowed)
 		return 0;
 
 	/* Calc approx time to dispatch */
-	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
+	jiffy_wait = ((tg->io_disp[rw] + nr_ios) * HZ)/tg->iops[rw] + 1;
 
 	if (jiffy_wait > jiffy_elapsed)
 		jiffy_wait = jiffy_wait - jiffy_elapsed;
@@ -640,9 +639,8 @@ static unsigned long tg_wait_iops_limit(struct throtl_data *td,
  * to bps limit.
  */
 static unsigned long tg_wait_bps_limit(struct throtl_data *td,
-			struct throtl_grp *tg, struct bio *bio)
+			struct throtl_grp *tg, bool rw, unsigned int sz)
 {
-	bool rw = bio_data_dir(bio);
 	u64 bytes_allowed, extra_bytes, tmp;
 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 
@@ -658,11 +656,11 @@ static unsigned long tg_wait_bps_limit(struct throtl_data *td,
 	do_div(tmp, HZ);
 	bytes_allowed = tmp;
 
-	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed)
+	if (tg->bytes_disp[rw] + sz <= bytes_allowed)
 		return 0;
 
 	/* Calc approx time to dispatch */
-	extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
+	extra_bytes = tg->bytes_disp[rw] + sz - bytes_allowed;
 	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 
 	if (!jiffy_wait)
@@ -690,10 +688,9 @@ static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
  * Retruns the number of jiffies one needs to wait before IO can be dispatched.
  * 0 means, IO can be dispatched now.
  */
-static unsigned long
-tg_wait_dispatch(struct throtl_data *td, struct throtl_grp *tg, struct bio *bio)
+static unsigned long tg_wait_dispatch(struct throtl_data *td,
+	struct throtl_grp *tg, bool rw, unsigned int sz, unsigned int nr_ios)
 {
-	bool rw = bio_data_dir(bio);
 	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 
 	/* If tg->bps = -1, then BW is unlimited */
@@ -712,8 +709,8 @@ tg_wait_dispatch(struct throtl_data *td, struct throtl_grp *tg, struct bio *bio)
 			throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
 	}
 
-	bps_wait = tg_wait_bps_limit(td, tg, bio);
-	iops_wait = tg_wait_iops_limit(td, tg, bio);
+	bps_wait = tg_wait_bps_limit(td, tg, rw, sz);
+	iops_wait = tg_wait_iops_limit(td, tg, rw, nr_ios);
 
 	max_wait = max(bps_wait, iops_wait);
 
@@ -723,16 +720,14 @@ tg_wait_dispatch(struct throtl_data *td, struct throtl_grp *tg, struct bio *bio)
 	return max_wait;
 }
 
-static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
+static void throtl_charge_io(struct throtl_grp *tg, bool rw, unsigned int sz,
+				unsigned int nr_ios, bool sync)
 {
-	bool rw = bio_data_dir(bio);
-	bool sync = bio->bi_rw & REQ_SYNC;
-
-	/* Charge the bio to the group */
-	tg->bytes_disp[rw] += bio->bi_size;
-	tg->io_disp[rw]++;
+	/* Charge the io to the group */
+	tg->bytes_disp[rw] += sz;
+	tg->io_disp[rw] += nr_ios;
 
-	blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
+	blkiocg_update_dispatch_stats(&tg->blkg, sz, rw, sync);
 }
 
 static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -754,10 +749,10 @@ static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
 	struct bio *bio;
 
 	if ((bio = bio_list_peek(&tg->bio_lists[READ])))
-		read_wait = tg_wait_dispatch(td, tg, bio);
+		read_wait = tg_wait_dispatch(td, tg, READ, bio->bi_size, 1);
 
 	if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
-		write_wait = tg_wait_dispatch(td, tg, bio);
+		write_wait = tg_wait_dispatch(td, tg, WRITE, bio->bi_size, 1);
 
 	min_wait = min(read_wait, write_wait);
 	disptime = jiffies + min_wait;
@@ -781,7 +776,7 @@ static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
 	BUG_ON(td->nr_queued[rw] <= 0);
 	td->nr_queued[rw]--;
 
-	throtl_charge_bio(tg, bio);
+	throtl_charge_io(tg, rw, bio->bi_size, 1, bio->bi_rw & REQ_SYNC);
 	bio_list_add(bl, bio);
 	bio->bi_rw |= REQ_THROTTLED;
 
@@ -799,7 +794,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
 	/* Try to dispatch 75% READS and 25% WRITES */
 
 	while ((bio = bio_list_peek(&tg->bio_lists[READ]))
-		&& !tg_wait_dispatch(td, tg, bio)) {
+		&& !tg_wait_dispatch(td, tg, READ, bio->bi_size, 1)) {
 
 		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 		nr_reads++;
@@ -809,7 +804,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
 	}
 
 	while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
-		&& !tg_wait_dispatch(td, tg, bio)) {
+		&& !tg_wait_dispatch(td, tg, WRITE, bio->bi_size, 1)) {
 
 		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 		nr_writes++;
@@ -1165,8 +1160,9 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
 	}
 
 	/* Bio is with-in rate limit of group */
-	if (!tg_wait_dispatch(td, tg, bio)) {
-		throtl_charge_bio(tg, bio);
+	if (!tg_wait_dispatch(td, tg, rw, bio->bi_size, 1)) {
+		throtl_charge_io(tg, rw, bio->bi_size, 1,
+					bio->bi_rw & REQ_SYNC);
 
 		/*
 		 * We need to trim slice even when bios are not being queued
-- 
1.7.4.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ