[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1436637654-28110-12-git-send-email-tj@kernel.org>
Date: Sat, 11 Jul 2015 14:00:54 -0400
From: Tejun Heo <tj@...nel.org>
To: axboe@...nel.dk
Cc: linux-kernel@...r.kernel.org, kernel-team@...com,
vgoyal@...hat.com, avanzini.arianna@...il.com,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 11/11] blkcg: replace blkcg_policy->cpd_size with ->cpd_alloc/free_fn() methods
Each active policy has a cpd (blkcg_policy_data) on each blkcg. The
cpd's were allocated by blkcg core and each policy could request to
allocate extra space at the end by setting blkcg_policy->cpd_size
larger than the size of cpd.
This is a bit unusual but blkg (blkcg_gq) policy data used to be
handled this way too so it made sense to be consistent; however, blkg
policy data switched to alloc/free callbacks.
This patch makes similar changes to cpd handling.
blkcg_policy->cpd_alloc/free_fn() are added to replace ->cpd_size. As
cpd allocation is now done from policy side, it can simply allocate a
larger area which embeds cpd at the beginning.
As ->cpd_alloc_fn() may be able to perform all necessary
initializations, this patch makes ->cpd_init_fn() optional.
Signed-off-by: Tejun Heo <tj@...nel.org>
Cc: Vivek Goyal <vgoyal@...hat.com>
Cc: Arianna Avanzini <avanzini.arianna@...il.com>
---
block/blk-cgroup.c | 39 ++++++++++++++++++++++++---------------
block/cfq-iosched.c | 19 ++++++++++++++++++-
include/linux/blk-cgroup.h | 17 ++++++++++-------
3 files changed, 52 insertions(+), 23 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8173e06..48d95ca 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -813,11 +813,15 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
int i;
mutex_lock(&blkcg_pol_mutex);
+
list_del(&blkcg->all_blkcgs_node);
- mutex_unlock(&blkcg_pol_mutex);
for (i = 0; i < BLKCG_MAX_POLS; i++)
- kfree(blkcg->cpd[i]);
+ if (blkcg->cpd[i])
+ blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
+
+ mutex_unlock(&blkcg_pol_mutex);
+
kfree(blkcg);
}
@@ -850,18 +854,18 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
* check if the policy requires any specific per-cgroup
* data: if it does, allocate and initialize it.
*/
- if (!pol || !pol->cpd_size)
+ if (!pol || !pol->cpd_alloc_fn)
continue;
- BUG_ON(blkcg->cpd[i]);
- cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
+ cpd = pol->cpd_alloc_fn(GFP_KERNEL);
if (!cpd) {
ret = ERR_PTR(-ENOMEM);
goto free_pd_blkcg;
}
blkcg->cpd[i] = cpd;
cpd->plid = i;
- pol->cpd_init_fn(cpd);
+ if (pol->cpd_init_fn)
+ pol->cpd_init_fn(cpd);
}
spin_lock_init(&blkcg->lock);
@@ -877,7 +881,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
free_pd_blkcg:
for (i--; i >= 0; i--)
- kfree(blkcg->cpd[i]);
+ if (blkcg->cpd[i])
+ blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
free_blkcg:
kfree(blkcg);
mutex_unlock(&blkcg_pol_mutex);
@@ -1154,11 +1159,11 @@ int blkcg_policy_register(struct blkcg_policy *pol)
blkcg_policy[pol->plid] = pol;
/* allocate and install cpd's */
- if (pol->cpd_size) {
+ if (pol->cpd_alloc_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
struct blkcg_policy_data *cpd;
- cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
+ cpd = pol->cpd_alloc_fn(GFP_KERNEL);
if (!cpd) {
mutex_unlock(&blkcg_pol_mutex);
goto err_free_cpds;
@@ -1180,10 +1185,12 @@ int blkcg_policy_register(struct blkcg_policy *pol)
return 0;
err_free_cpds:
- if (pol->cpd_size) {
+ if (pol->cpd_alloc_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
- kfree(blkcg->cpd[pol->plid]);
- blkcg->cpd[pol->plid] = NULL;
+ if (blkcg->cpd[pol->plid]) {
+ pol->cpd_free_fn(blkcg->cpd[pol->plid]);
+ blkcg->cpd[pol->plid] = NULL;
+ }
}
}
blkcg_policy[pol->plid] = NULL;
@@ -1216,10 +1223,12 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
/* remove cpds and unregister */
mutex_lock(&blkcg_pol_mutex);
- if (pol->cpd_size) {
+ if (pol->cpd_alloc_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
- kfree(blkcg->cpd[pol->plid]);
- blkcg->cpd[pol->plid] = NULL;
+ if (blkcg->cpd[pol->plid]) {
+ pol->cpd_free_fn(blkcg->cpd[pol->plid]);
+ blkcg->cpd[pol->plid] = NULL;
+ }
}
}
blkcg_policy[pol->plid] = NULL;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index dd6ea9e..a4429b3 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1568,6 +1568,16 @@ static void cfqg_stats_init(struct cfqg_stats *stats)
#endif
}
+static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
+{
+ struct cfq_group_data *cgd;
+
+ cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
+ if (!cgd)
+ return NULL;
+ return &cgd->cpd;
+}
+
static void cfq_cpd_init(struct blkcg_policy_data *cpd)
{
struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
@@ -1581,6 +1591,11 @@ static void cfq_cpd_init(struct blkcg_policy_data *cpd)
}
}
+static void cfq_cpd_free(struct blkcg_policy_data *cpd)
+{
+ kfree(cpd_to_cfqgd(cpd));
+}
+
static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
{
struct cfq_group *cfqg;
@@ -4649,10 +4664,12 @@ static struct elevator_type iosched_cfq = {
#ifdef CONFIG_CFQ_GROUP_IOSCHED
static struct blkcg_policy blkcg_policy_cfq = {
- .cpd_size = sizeof(struct cfq_group_data),
.cftypes = cfq_blkcg_files,
+ .cpd_alloc_fn = cfq_cpd_alloc,
.cpd_init_fn = cfq_cpd_init,
+ .cpd_free_fn = cfq_cpd_free,
+
.pd_alloc_fn = cfq_pd_alloc,
.pd_init_fn = cfq_pd_init,
.pd_offline_fn = cfq_pd_offline,
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 7988d47..15f2382 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -81,11 +81,11 @@ struct blkg_policy_data {
};
/*
- * Policies that need to keep per-blkcg data which is independent
- * from any request_queue associated to it must specify its size
- * with the cpd_size field of the blkcg_policy structure and
- * embed a blkcg_policy_data in it. cpd_init() is invoked to let
- * each policy handle per-blkcg data.
+ * Policies that need to keep per-blkcg data which is independent from any
+ * request_queue associated to it should implement cpd_alloc/free_fn()
+ * methods. A policy can allocate private data area by allocating larger
+ * data structure which embeds blkcg_policy_data at the beginning.
+ * cpd_init() is invoked to let each policy handle per-blkcg data.
*/
struct blkcg_policy_data {
/* the blkcg and policy id this per-policy data belongs to */
@@ -124,7 +124,9 @@ struct blkcg_gq {
struct rcu_head rcu_head;
};
+typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
+typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
@@ -134,13 +136,14 @@ typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
struct blkcg_policy {
int plid;
- /* policy specific per-blkcg data size */
- size_t cpd_size;
/* cgroup files for the policy */
struct cftype *cftypes;
/* operations */
+ blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
blkcg_pol_init_cpd_fn *cpd_init_fn;
+ blkcg_pol_free_cpd_fn *cpd_free_fn;
+
blkcg_pol_alloc_pd_fn *pd_alloc_fn;
blkcg_pol_init_pd_fn *pd_init_fn;
blkcg_pol_online_pd_fn *pd_online_fn;
--
2.4.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists