[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211004091151-mutt-send-email-mst@kernel.org>
Date: Mon, 4 Oct 2021 09:12:08 -0400
From: "Michael S. Tsirkin" <mst@...hat.com>
To: Mike Christie <michael.christie@...cle.com>
Cc: hdanton@...a.com, hch@...radead.org, stefanha@...hat.com,
jasowang@...hat.com, sgarzare@...hat.com,
virtualization@...ts.linux-foundation.org,
christian.brauner@...ntu.com, axboe@...nel.dk,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH V2 8/9] vhost: move worker thread fields to new struct
On Tue, Sep 21, 2021 at 04:52:17PM -0500, Mike Christie wrote:
> This is just a prep patch. It moves the worker related fields to a new
> vhost_worker struct and moves the code around to create some helpers that
> will be used in the next patches.
>
> Signed-off-by: Mike Christie <michael.christie@...cle.com>
> Reviewed-by: Stefan Hajnoczi <stefanha@...hat.com>
Acked-by: Michael S. Tsirkin <mst@...hat.com>
Feel free to merge with other bits.
> ---
> drivers/vhost/vhost.c | 98 ++++++++++++++++++++++++++++---------------
> drivers/vhost/vhost.h | 11 +++--
> 2 files changed, 72 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index 59edb5a1ffe2..c9a1f706989c 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -263,8 +263,8 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
> * sure it was not in the list.
> * test_and_set_bit() implies a memory barrier.
> */
> - llist_add(&work->node, &dev->work_list);
> - wake_up_process(dev->worker);
> + llist_add(&work->node, &dev->worker->work_list);
> + wake_up_process(dev->worker->task);
> }
> }
> EXPORT_SYMBOL_GPL(vhost_work_queue);
> @@ -272,7 +272,7 @@ EXPORT_SYMBOL_GPL(vhost_work_queue);
> /* A lockless hint for busy polling code to exit the loop */
> bool vhost_has_work(struct vhost_dev *dev)
> {
> - return !llist_empty(&dev->work_list);
> + return dev->worker && !llist_empty(&dev->worker->work_list);
> }
> EXPORT_SYMBOL_GPL(vhost_has_work);
>
> @@ -343,7 +343,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
>
> static int vhost_worker(void *data)
> {
> - struct vhost_dev *dev = data;
> + struct vhost_worker *worker = data;
> + struct vhost_dev *dev = worker->dev;
> struct vhost_work *work, *work_next;
> struct llist_node *node;
>
> @@ -358,7 +359,7 @@ static int vhost_worker(void *data)
> break;
> }
>
> - node = llist_del_all(&dev->work_list);
> + node = llist_del_all(&worker->work_list);
> if (!node)
> schedule();
>
> @@ -368,7 +369,7 @@ static int vhost_worker(void *data)
> llist_for_each_entry_safe(work, work_next, node, node) {
> clear_bit(VHOST_WORK_QUEUED, &work->flags);
> __set_current_state(TASK_RUNNING);
> - kcov_remote_start_common(dev->kcov_handle);
> + kcov_remote_start_common(worker->kcov_handle);
> work->fn(work);
> kcov_remote_stop();
> if (need_resched())
> @@ -487,7 +488,6 @@ void vhost_dev_init(struct vhost_dev *dev,
> dev->byte_weight = byte_weight;
> dev->use_worker = use_worker;
> dev->msg_handler = msg_handler;
> - init_llist_head(&dev->work_list);
> init_waitqueue_head(&dev->wait);
> INIT_LIST_HEAD(&dev->read_list);
> INIT_LIST_HEAD(&dev->pending_list);
> @@ -579,10 +579,60 @@ static void vhost_detach_mm(struct vhost_dev *dev)
> dev->mm = NULL;
> }
>
> +static void vhost_worker_free(struct vhost_dev *dev)
> +{
> + struct vhost_worker *worker = dev->worker;
> +
> + if (!worker)
> + return;
> +
> + dev->worker = NULL;
> + WARN_ON(!llist_empty(&worker->work_list));
> + kthread_stop(worker->task);
> + kfree(worker);
> +}
> +
> +static int vhost_worker_create(struct vhost_dev *dev)
> +{
> + struct vhost_worker *worker;
> + struct task_struct *task;
> + int ret;
> +
> + worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
> + if (!worker)
> + return -ENOMEM;
> +
> + dev->worker = worker;
> + worker->dev = dev;
> + worker->kcov_handle = kcov_common_handle();
> + init_llist_head(&worker->work_list);
> +
> + task = kthread_create(vhost_worker, worker, "vhost-%d", current->pid);
> + if (IS_ERR(task)) {
> + ret = PTR_ERR(task);
> + goto free_worker;
> + }
> +
> + worker->task = task;
> + wake_up_process(task); /* avoid contributing to loadavg */
> +
> + ret = vhost_attach_cgroups(dev);
> + if (ret)
> + goto stop_worker;
> +
> + return 0;
> +
> +stop_worker:
> + kthread_stop(worker->task);
> +free_worker:
> + kfree(worker);
> + dev->worker = NULL;
> + return ret;
> +}
> +
> /* Caller should have device mutex */
> long vhost_dev_set_owner(struct vhost_dev *dev)
> {
> - struct task_struct *worker;
> int err;
>
> /* Is there an owner already? */
> @@ -593,36 +643,21 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
>
> vhost_attach_mm(dev);
>
> - dev->kcov_handle = kcov_common_handle();
> if (dev->use_worker) {
> - worker = kthread_create(vhost_worker, dev,
> - "vhost-%d", current->pid);
> - if (IS_ERR(worker)) {
> - err = PTR_ERR(worker);
> - goto err_worker;
> - }
> -
> - dev->worker = worker;
> - wake_up_process(worker); /* avoid contributing to loadavg */
> -
> - err = vhost_attach_cgroups(dev);
> + err = vhost_worker_create(dev);
> if (err)
> - goto err_cgroup;
> + goto err_worker;
> }
>
> err = vhost_dev_alloc_iovecs(dev);
> if (err)
> - goto err_cgroup;
> + goto err_iovecs;
>
> return 0;
> -err_cgroup:
> - if (dev->worker) {
> - kthread_stop(dev->worker);
> - dev->worker = NULL;
> - }
> +err_iovecs:
> + vhost_worker_free(dev);
> err_worker:
> vhost_detach_mm(dev);
> - dev->kcov_handle = 0;
> err_mm:
> return err;
> }
> @@ -712,12 +747,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
> dev->iotlb = NULL;
> vhost_clear_msg(dev);
> wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
> - WARN_ON(!llist_empty(&dev->work_list));
> - if (dev->worker) {
> - kthread_stop(dev->worker);
> - dev->worker = NULL;
> - dev->kcov_handle = 0;
> - }
> + vhost_worker_free(dev);
> vhost_detach_mm(dev);
> }
> EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 638bb640d6b4..102ce25e4e13 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -25,6 +25,13 @@ struct vhost_work {
> unsigned long flags;
> };
>
> +struct vhost_worker {
> + struct task_struct *task;
> + struct llist_head work_list;
> + struct vhost_dev *dev;
> + u64 kcov_handle;
> +};
> +
> /* Poll a file (eventfd or socket) */
> /* Note: there's nothing vhost specific about this structure. */
> struct vhost_poll {
> @@ -148,8 +155,7 @@ struct vhost_dev {
> struct vhost_virtqueue **vqs;
> int nvqs;
> struct eventfd_ctx *log_ctx;
> - struct llist_head work_list;
> - struct task_struct *worker;
> + struct vhost_worker *worker;
> struct vhost_iotlb *umem;
> struct vhost_iotlb *iotlb;
> spinlock_t iotlb_lock;
> @@ -159,7 +165,6 @@ struct vhost_dev {
> int iov_limit;
> int weight;
> int byte_weight;
> - u64 kcov_handle;
> bool use_worker;
> int (*msg_handler)(struct vhost_dev *dev,
> struct vhost_iotlb_msg *msg);
> --
> 2.25.1
Powered by blists - more mailing lists