[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250223154042.556001-4-lulu@redhat.com>
Date: Sun, 23 Feb 2025 23:36:18 +0800
From: Cindy Lu <lulu@...hat.com>
To: lulu@...hat.com,
jasowang@...hat.com,
mst@...hat.com,
michael.christie@...cle.com,
sgarzare@...hat.com,
linux-kernel@...r.kernel.org,
virtualization@...ts.linux-foundation.org,
netdev@...r.kernel.org
Subject: [PATCH v6 3/6] vhost: Add the cgroup related function
Add back the previously removed cgroup function to support the kthread
The biggest change for this part is in vhost_attach_cgroups() and
vhost_attach_task_to_cgroups().
Reuse the function __vhost_worker_flush, but in this situation, the
attachment_cnt is 0. Therefore, add a boolean to disable this check.
The old function was remove in
commit 6e890c5d5021 ("vhost: use vhost_tasks for worker threads")
Signed-off-by: Cindy Lu <lulu@...hat.com>
---
drivers/vhost/vhost.c | 42 +++++++++++++++++++++++++++++++++++++-----
1 file changed, 37 insertions(+), 5 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1feba29abf95..adbb957c8b5f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
+#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/sched/mm.h>
@@ -269,11 +270,12 @@ EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
*
* The worker's flush_mutex must be held.
*/
-static void __vhost_worker_flush(struct vhost_worker *worker)
+static void __vhost_worker_flush(struct vhost_worker *worker,
+ bool ignore_attachment)
{
struct vhost_flush_struct flush;
- if (!worker->attachment_cnt || worker->killed)
+ if ((!ignore_attachment && !worker->attachment_cnt) || worker->killed)
return;
init_completion(&flush.wait_event);
@@ -292,7 +294,7 @@ static void __vhost_worker_flush(struct vhost_worker *worker)
static void vhost_worker_flush(struct vhost_worker *worker)
{
mutex_lock(&worker->mutex);
- __vhost_worker_flush(worker);
+ __vhost_worker_flush(worker, false);
mutex_unlock(&worker->mutex);
}
@@ -620,6 +622,36 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_task_to_cgroups(struct vhost_worker *worker)
+{
+ struct vhost_attach_cgroups_struct attach;
+
+ attach.owner = current;
+
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_worker_queue(worker, &attach.work);
+
+ mutex_lock(&worker->mutex);
+ __vhost_worker_flush(worker, true);
+ mutex_unlock(&worker->mutex);
+
+ return attach.ret;
+}
+
/* Caller should have device mutex */
bool vhost_dev_has_owner(struct vhost_dev *dev)
{
@@ -793,7 +825,7 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
/* Make sure new vq queue/flush/poll calls see the new worker */
synchronize_rcu();
/* Make sure whatever was queued gets run */
- __vhost_worker_flush(old_worker);
+ __vhost_worker_flush(old_worker, false);
old_worker->attachment_cnt--;
mutex_unlock(&old_worker->mutex);
}
@@ -852,7 +884,7 @@ static int vhost_free_worker(struct vhost_dev *dev,
* to zero. Make sure flushes are flushed from the queue before
* freeing.
*/
- __vhost_worker_flush(worker);
+ __vhost_worker_flush(worker, false);
mutex_unlock(&worker->mutex);
vhost_worker_destroy(dev, worker);
--
2.45.0
Powered by blists - more mailing lists