lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b92908a766f0cd165a88e102a9b8e8b9245d1172.1456178093.git.shli@fb.com>
Date:	Mon, 22 Feb 2016 14:01:21 -0800
From:	Shaohua Li <shli@...com>
To:	<linux-kernel@...r.kernel.org>, <linux-block@...r.kernel.org>
CC:	<axboe@...nel.dk>, <tj@...nel.org>,
	Vivek Goyal <vgoyal@...hat.com>,
	"jmoyer @ redhat . com" <jmoyer@...hat.com>, <Kernel-team@...com>
Subject: [PATCH V2 06/13] blk-throttle: add per-cgroup data

Currently we only per-cgroup per-queue data. This adds per-cgroup data
(cgroup weight). Changing the per-cgroup weight will change all
per-cgroup per-queue weight.

Signed-off-by: Shaohua Li <shli@...com>
---
 block/blk-throttle.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 46 insertions(+), 1 deletion(-)

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 43de1dc..a0fd33e 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -112,6 +112,7 @@ struct throtl_io_cost {
 	unsigned int io_disp[2];
 };
 
+/* per cgroup per device data */
 struct throtl_grp {
 	/* must be the first member */
 	struct blkg_policy_data pd;
@@ -155,6 +156,14 @@ struct throtl_grp {
 	unsigned long slice_end[2];
 };
 
+/* per-cgroup data */
+struct throtl_group_data {
+	/* must be the first member */
+	struct blkcg_policy_data cpd;
+
+	unsigned int weight;
+};
+
 enum run_mode {
 	MODE_NONE = 0,
 	MODE_THROTTLE = 1, /* bandwidth/iops based throttle */
@@ -246,6 +255,16 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
 		return container_of(sq, struct throtl_data, service_queue);
 }
 
+static inline struct throtl_group_data *cpd_to_tgd(struct blkcg_policy_data *cpd)
+{
+	return cpd ? container_of(cpd, struct throtl_group_data, cpd) : NULL;
+}
+
+static inline struct throtl_group_data *blkcg_to_tgd(struct blkcg *blkcg)
+{
+	return cpd_to_tgd(blkcg_to_cpd(blkcg, &blkcg_policy_throtl));
+}
+
 static inline int tg_data_index(struct throtl_grp *tg, bool rw)
 {
 	if (td_weight_based(tg->td))
@@ -385,6 +404,28 @@ static struct bio *throtl_pop_queued(struct list_head *queued,
 	return bio;
 }
 
+static struct blkcg_policy_data *throtl_cpd_alloc(gfp_t gfp)
+{
+	struct throtl_group_data *tgd;
+
+	tgd = kzalloc(sizeof(*tgd), gfp);
+	if (!tgd)
+		return NULL;
+	return &tgd->cpd;
+}
+
+static void throtl_cpd_init(struct blkcg_policy_data *cpd)
+{
+	struct throtl_group_data *tgd = cpd_to_tgd(cpd);
+
+	tgd->weight = DFT_WEIGHT;
+}
+
+static void throtl_cpd_free(struct blkcg_policy_data *cpd)
+{
+	kfree(cpd_to_tgd(cpd));
+}
+
 /* init a service_queue, assumes the caller zeroed it */
 static void throtl_service_queue_init(struct throtl_service_queue *sq)
 {
@@ -449,7 +490,7 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
 	sq->parent_sq = &td->service_queue;
 	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
 		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
-	sq->weight = DFT_WEIGHT;
+	sq->weight = blkcg_to_tgd(blkg->blkcg)->weight;
 	sq->acting_weight = 0;
 	tg->td = td;
 }
@@ -1677,6 +1718,10 @@ static struct blkcg_policy blkcg_policy_throtl = {
 	.dfl_cftypes		= throtl_files,
 	.legacy_cftypes		= throtl_legacy_files,
 
+	.cpd_alloc_fn		= throtl_cpd_alloc,
+	.cpd_init_fn		= throtl_cpd_init,
+	.cpd_free_fn		= throtl_cpd_free,
+
 	.pd_alloc_fn		= throtl_pd_alloc,
 	.pd_init_fn		= throtl_pd_init,
 	.pd_online_fn		= throtl_pd_online,
-- 
2.6.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ