[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240909013531.1243525-7-lulu@redhat.com>
Date: Mon, 9 Sep 2024 09:33:45 +0800
From: Cindy Lu <lulu@...hat.com>
To: lulu@...hat.com,
jasowang@...hat.com,
mst@...hat.com,
michael.christie@...cle.com,
linux-kernel@...r.kernel.org,
virtualization@...ts.linux-foundation.org
Subject: [PATCH v1 6/7] vhost: Add kthread support in function vhost_worker_create
Split the function vhost_worker_create to support both task and kthread
Added back the previous old function vhost_worker_create and rename it to
vhost_worker_create_khtread to support the khtread.
The new vhost_worker_create will be selected which to use based on the
value of the parameter.
the old function vhost_worker_create was change to support task in
commit 6e890c5d5021 ('vhost: use vhost_tasks for worker threads')
also changed in
commit 1cdaafa1b8b ('vhost: replace single worker pointer with xarray')
commit c011bb669dd ('vhost: dynamically allocate vhost_worker')
Signed-off-by: Cindy Lu <lulu@...hat.com>
---
drivers/vhost/vhost.c | 55 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 54 insertions(+), 1 deletion(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f05545b125f0..bf1e971cb06f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -807,7 +807,8 @@ static void vhost_workers_free(struct vhost_dev *dev)
else
vhost_workers_free_task(dev);
}
-static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+
+static struct vhost_worker *vhost_worker_create_task(struct vhost_dev *dev)
{
struct vhost_worker *worker;
struct vhost_task *vtsk;
@@ -848,6 +849,50 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
return NULL;
}
+static struct vhost_worker *vhost_worker_create_kthread(struct vhost_dev *dev)
+{
+ struct vhost_worker *worker;
+ struct task_struct *task;
+ int ret;
+ u32 id;
+
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
+ if (!worker)
+ return NULL;
+
+ worker->dev = dev;
+ worker->kcov_handle = kcov_common_handle();
+
+ mutex_init(&worker->mutex);
+ init_llist_head(&worker->work_list);
+
+ task = kthread_create(vhost_run_work_kthread_list, worker, "vhost-%d",
+ current->pid);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto free_worker;
+ }
+
+ worker->task = task;
+ wake_up_process(task); /* avoid contributing to loadavg */
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0)
+ goto stop_worker;
+ worker->id = id;
+
+ ret = vhost_attach_cgroups(dev);
+ if (ret)
+ goto stop_worker;
+
+ return worker;
+
+stop_worker:
+ kthread_stop(worker->task);
+free_worker:
+ kfree(worker);
+ return NULL;
+}
+
/* Caller must have device mutex */
static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
struct vhost_worker *worker)
@@ -936,6 +981,14 @@ static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
return 0;
}
+static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
+{
+ if (use_kthread)
+ return vhost_worker_create_kthread(dev);
+ else
+ return vhost_worker_create_task(dev);
+}
+
/* Caller must have device mutex */
static int vhost_new_worker(struct vhost_dev *dev,
struct vhost_worker_state *info)
--
2.45.0
Powered by blists - more mailing lists