[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221104182050.342908-4-longman@redhat.com>
Date: Fri, 4 Nov 2022 14:20:50 -0400
From: Waiman Long <longman@...hat.com>
To: Tejun Heo <tj@...nel.org>, Jens Axboe <axboe@...nel.dk>
Cc: cgroups@...r.kernel.org, linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org, Ming Lei <ming.lei@...hat.com>,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Michal Koutný <mkoutny@...e.com>,
Hillf Danton <hdanton@...a.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH v9 3/3] blk-cgroup: Flush stats at blkgs destruction path
As noted by Michal, the blkg_iostat_set's in the lockless list
hold reference to blkg's to protect against their removal. Those
blkg's hold reference to blkcg. When a cgroup is being destroyed,
cgroup_rstat_flush() is only called at css_release_work_fn() which is
called when the blkcg reference count reaches 0. This circular dependency
will prevent blkcg from being freed until some other events cause
cgroup_rstat_flush() to be called to flush out the pending blkcg stats.
To prevent this delayed blkcg removal, add a new cgroup_rstat_css_flush()
function to flush stats for a given css and cpu and call it at the blkgs
destruction path, blkcg_destroy_blkgs(), whenever there are still some
pending stats to be flushed. This will ensure that blkcg reference
count can reach 0 ASAP.
Signed-off-by: Waiman Long <longman@...hat.com>
---
block/blk-cgroup.c | 15 ++++++++++++++-
include/linux/cgroup.h | 1 +
kernel/cgroup/rstat.c | 20 ++++++++++++++++++++
3 files changed, 35 insertions(+), 1 deletion(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3e03c0d13253..fa0a366e3476 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1084,10 +1084,12 @@ struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css)
*/
static void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
+ int cpu;
+
might_sleep();
+ css_get(&blkcg->css);
spin_lock_irq(&blkcg->lock);
-
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
struct blkcg_gq, blkcg_node);
@@ -1110,6 +1112,17 @@ static void blkcg_destroy_blkgs(struct blkcg *blkcg)
}
spin_unlock_irq(&blkcg->lock);
+
+ /*
+ * Flush all the non-empty percpu lockless lists.
+ */
+ for_each_possible_cpu(cpu) {
+ struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
+
+ if (!llist_empty(lhead))
+ cgroup_rstat_css_flush(&blkcg->css, cpu);
+ }
+ css_put(&blkcg->css);
}
/**
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 528bd44b59e2..4a61cc5d1952 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -766,6 +766,7 @@ void cgroup_rstat_flush(struct cgroup *cgrp);
void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
void cgroup_rstat_flush_release(void);
+void cgroup_rstat_css_flush(struct cgroup_subsys_state *css, int cpu);
/*
* Basic resource stats.
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 793ecff29038..28033190fb29 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -281,6 +281,26 @@ void cgroup_rstat_flush_release(void)
spin_unlock_irq(&cgroup_rstat_lock);
}
+/**
+ * cgroup_rstat_css_flush - flush stats for the given css and cpu
+ * @css: target css to be flush
+ * @cpu: the cpu that holds the stats to be flush
+ *
+ * A lightweight rstat flush operation for a given css and cpu.
+ * Only the cpu_lock is being held for mutual exclusion, the cgroup_rstat_lock
+ * isn't used.
+ */
+void cgroup_rstat_css_flush(struct cgroup_subsys_state *css, int cpu)
+{
+ raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
+
+ raw_spin_lock_irq(cpu_lock);
+ rcu_read_lock();
+ css->ss->css_rstat_flush(css, cpu);
+ rcu_read_unlock();
+ raw_spin_unlock_irq(cpu_lock);
+}
+
int cgroup_rstat_init(struct cgroup *cgrp)
{
int cpu;
--
2.31.1
Powered by blists - more mailing lists