[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171115171823.GW983427@devbig577.frc2.facebook.com>
Date: Wed, 15 Nov 2017 09:18:23 -0800
From: Tejun Heo <tj@...nel.org>
To: Shaohua Li <shli@...nel.org>
Cc: axboe@...nel.dk, linux-kernel@...r.kernel.org, kernel-team@...com,
lizefan@...wei.com, hannes@...xchg.org, cgroups@...r.kernel.org,
guro@...com
Subject: [PATCH v2 6/7] blkcg: account requests instead of bios for request
based request_queues
blkcg accounting is currently bio based, which is silly for request
based request_queues. This is silly as the number of bios doesn't
have much to do with the actual number of IOs issued to the underlying
device (can be significantly higher or lower) and may change depending
on the implementation details on how the bios are issued (e.g. from
the recent split-bios-while-issuing change).
Do cgroup accounting for request based request_queues together with
gendisk accounting on request completion.
This makes cgroup accounting consistent with gendisk accounting and
what's happening on the system.
v2: Use q->request_fn to skip bio based accounting instead of
QUEUE_FLAG_IOSTAT as suggested by Shaohua.
Signed-off-by: Tejun Heo <tj@...nel.org>
Reviewed-by: Shaohua Li <shli@...nel.org>
---
block/blk-core.c | 3 +++
include/linux/blk-cgroup.h | 18 +++++++++++++++++-
2 files changed, 20 insertions(+), 1 deletion(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2429,6 +2429,7 @@ void blk_account_io_completion(struct re
cpu = part_stat_lock();
part = req->part;
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
+ blkcg_account_io_completion(req, bytes);
part_stat_unlock();
}
}
@@ -2454,6 +2455,8 @@ void blk_account_io_done(struct request
part_round_stats(req->q, cpu, part);
part_dec_in_flight(req->q, part, rw);
+ blkcg_account_io_done(req);
+
hd_struct_put(part);
part_stat_unlock();
}
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -715,7 +715,8 @@ static inline bool blkcg_bio_issue_check
throtl = blk_throtl_bio(q, blkg, bio);
- if (!throtl) {
+ /* if @q does io stat, blkcg stats are updated together with them */
+ if (!q->request_fn && !throtl) {
blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
bio->bi_iter.bi_size);
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
@@ -764,6 +765,17 @@ static inline void blk_rq_disassociate_b
rq->blkg = NULL;
}
+static inline void blkcg_account_io_completion(struct request *rq,
+ unsigned int bytes)
+{
+ blkg_rwstat_add(&rq->blkg->stat_bytes, rq_data_dir(rq), bytes);
+}
+
+static inline void blkcg_account_io_done(struct request *rq)
+{
+ blkg_rwstat_add(&rq->blkg->stat_ios, rq_data_dir(rq), 1);
+}
+
#else /* CONFIG_BLK_CGROUP */
struct blkcg {
@@ -823,6 +835,10 @@ static inline bool blkcg_bio_issue_check
static inline void blk_rq_associate_blkg(struct request *rq, struct blkcg *blkcg) { }
static inline void blk_rq_disassociate_blkg(struct request *rq) { }
+static inline void blkcg_account_io_completion(struct request *rq,
+ unsigned int bytes) { }
+static inline void blkcg_account_io_done(struct request *rq) { }
+
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
Powered by blists - more mailing lists