[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241004015937.2286459-7-lulu@redhat.com>
Date: Fri, 4 Oct 2024 09:58:20 +0800
From: Cindy Lu <lulu@...hat.com>
To: lulu@...hat.com,
jasowang@...hat.com,
mst@...hat.com,
michael.christie@...cle.com,
linux-kernel@...r.kernel.org,
virtualization@...ts.linux-foundation.org
Subject: [PATCH v2 6/7] vhost: Add kthread support in function vhost_worker_create
Split the function vhost_worker_create to support both task and kthread
Added back the previous old function vhost_worker_create and rename it to
vhost_worker_create_khtread to support the khtread.
The new vhost_worker_create will be selected which to use based on the
value of the parameter.
the old function vhost_worker_create was change to support task in
commit 6e890c5d5021 ("vhost: use vhost_tasks for worker threads")
also changed in
commit 1cdaafa1b8b4 ("vhost: replace single worker pointer with xarray")
commit c011bb669ddc ("vhost: dynamically allocate vhost_worker")
Signed-off-by: Cindy Lu <lulu@...hat.com>
---
drivers/vhost/vhost.c | 55 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 54 insertions(+), 1 deletion(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index eb30da658bfe..08c9e77916ca 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -808,7 +808,8 @@ static void vhost_workers_free(struct vhost_dev *dev)
else
vhost_workers_free_kthread(dev);
}
-static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+
+static struct vhost_worker *vhost_worker_create_task(struct vhost_dev *dev)
{
struct vhost_worker *worker;
struct vhost_task *vtsk;
@@ -849,6 +850,50 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
return NULL;
}
+static struct vhost_worker *vhost_worker_create_kthread(struct vhost_dev *dev)
+{
+ struct vhost_worker *worker;
+ struct task_struct *task;
+ int ret;
+ u32 id;
+
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
+ if (!worker)
+ return NULL;
+
+ worker->dev = dev;
+ worker->kcov_handle = kcov_common_handle();
+
+ mutex_init(&worker->mutex);
+ init_llist_head(&worker->work_list);
+
+ task = kthread_create(vhost_run_work_kthread_list, worker, "vhost-%d",
+ current->pid);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto free_worker;
+ }
+
+ worker->task = task;
+ wake_up_process(task); /* avoid contributing to loadavg */
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0)
+ goto stop_worker;
+ worker->id = id;
+
+ ret = vhost_attach_cgroups(dev);
+ if (ret)
+ goto stop_worker;
+
+ return worker;
+
+stop_worker:
+ kthread_stop(worker->task);
+free_worker:
+ kfree(worker);
+ return NULL;
+}
+
/* Caller must have device mutex */
static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
struct vhost_worker *worker)
@@ -937,6 +982,14 @@ static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
return 0;
}
+static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+{
+ if (enforce_inherit_owner)
+ return vhost_worker_create_task(dev);
+ else
+ return vhost_worker_create_kthread(dev);
+}
+
/* Caller must have device mutex */
static int vhost_new_worker(struct vhost_dev *dev,
struct vhost_worker_state *info)
--
2.45.0
Powered by blists - more mailing lists