lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 26 Oct 2012 12:47:48 +0800
From:	Robin Dong <robin.k.dong@...il.com>
To:	linux-kernel@...r.kernel.org
Cc:	Robin Dong <sanbai@...bao.com>, Tejun Heo <tj@...nel.org>,
	Vivek Goyal <vgoyal@...hat.com>, Jens Axboe <axboe@...nel.dk>,
	Tao Ma <boyu.mt@...bao.com>
Subject: [PATCH 2/2 v4] block/throttle: Add IO submitted information in blkio.throttle

From: Robin Dong <sanbai@...bao.com>

Currently, if the IO is throttled by io-throttle, the system admin has no idea
of the situation and can't report it to the real application user about that
he/she has to do something.

So this patch adds a new interface named blkio.throttle.io_submitted which
exposes the number of bios that have been sent into blk-throttle therefore the
user could calculate the difference from throttle.io_serviced to see how many
IOs are currently throttled.

Cc: Tejun Heo <tj@...nel.org>
Cc: Vivek Goyal <vgoyal@...hat.com>
Cc: Jens Axboe <axboe@...nel.dk>
Signed-off-by: Tao Ma <boyu.mt@...bao.com>
Signed-off-by: Robin Dong <sanbai@...bao.com>
---
v3 <-- v2:
 - Use nr-queued[] of struct throtl_grp for stats instaed of adding new blkg_rwstat.

v4 <-- v3:
 - Add two new blkg_rwstat arguments to count total bios be sent into blk_throttle.

 block/blk-throttle.c |   43 +++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 43 insertions(+), 0 deletions(-)

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 46ddeff..c6391b5 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -46,6 +46,10 @@ struct tg_stats_cpu {
 	struct blkg_rwstat		service_bytes;
 	/* total IOs serviced, post merge */
 	struct blkg_rwstat		serviced;
+	/* total bytes submitted into blk-throttle */
+	struct blkg_rwstat		submit_bytes;
+	/* total IOs submitted into blk-throttle */
+	struct blkg_rwstat		submitted;
 };
 
 struct throtl_grp {
@@ -266,6 +270,8 @@ static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
 
 		blkg_rwstat_reset(&sc->service_bytes);
 		blkg_rwstat_reset(&sc->serviced);
+		blkg_rwstat_reset(&sc->submit_bytes);
+		blkg_rwstat_reset(&sc->submitted);
 	}
 }
 
@@ -699,6 +705,30 @@ static void throtl_update_dispatch_stats(struct throtl_grp *tg, u64 bytes,
 	local_irq_restore(flags);
 }
 
+static void throtl_update_submit_stats(struct throtl_grp *tg, u64 bytes, int rw)
+{
+	struct tg_stats_cpu *stats_cpu;
+	unsigned long flags;
+
+	/* If per cpu stats are not allocated yet, don't do any accounting. */
+	if (tg->stats_cpu == NULL)
+		return;
+
+	/*
+	 * Disabling interrupts to provide mutual exclusion between two
+	 * writes on same cpu. It probably is not needed for 64bit. Not
+	 * optimizing that case yet.
+	 */
+	local_irq_save(flags);
+
+	stats_cpu = this_cpu_ptr(tg->stats_cpu);
+
+	blkg_rwstat_add(&stats_cpu->submitted, rw, 1);
+	blkg_rwstat_add(&stats_cpu->submit_bytes, rw, bytes);
+
+	local_irq_restore(flags);
+}
+
 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 {
 	bool rw = bio_data_dir(bio);
@@ -1084,6 +1114,16 @@ static struct cftype throtl_files[] = {
 		.private = offsetof(struct tg_stats_cpu, serviced),
 		.read_seq_string = tg_print_cpu_rwstat,
 	},
+	{
+		.name = "throttle.io_submit_bytes",
+		.private = offsetof(struct tg_stats_cpu, submit_bytes),
+		.read_seq_string = tg_print_cpu_rwstat,
+	},
+	{
+		.name = "throttle.io_submitted",
+		.private = offsetof(struct tg_stats_cpu, submitted),
+		.read_seq_string = tg_print_cpu_rwstat,
+	},
 	{ }	/* terminate */
 };
 
@@ -1128,6 +1168,8 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
 		if (tg_no_rule_group(tg, rw)) {
 			throtl_update_dispatch_stats(tg,
 						     bio->bi_size, bio->bi_rw);
+			throtl_update_submit_stats(tg,
+					bio->bi_size, bio->bi_rw);
 			goto out_unlock_rcu;
 		}
 	}
@@ -1141,6 +1183,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
 	if (unlikely(!tg))
 		goto out_unlock;
 
+	throtl_update_submit_stats(tg, bio->bi_size, bio->bi_rw);
 	if (tg->nr_queued[rw]) {
 		/*
 		 * There is already another bio queued in same dir. No
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ