[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CACGkMEuAxEEvShwN8Q_k-FKZODesORn4zJG7UFHD-KS8sQXYjg@mail.gmail.com>
Date: Fri, 16 Dec 2022 13:43:00 +0800
From: Jason Wang <jasowang@...hat.com>
To: Xie Yongji <xieyongji@...edance.com>
Cc: mst@...hat.com, tglx@...utronix.de, hch@....de,
virtualization@...ts.linux-foundation.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 09/11] vduse: Add enable_irq_wq sysfs interface for virtqueues
On Mon, Dec 5, 2022 at 5:03 PM Xie Yongji <xieyongji@...edance.com> wrote:
>
> Add enable_irq_wq sysfs interface to control whether
> use workqueue to inject irq or not. The vhost-vdpa case
> can benefit from it.
Do we have a benchmark result for this?
Or I wonder if we can extend set_vq_cb() by associating an eventfd
then VDUSE can signal that eventfd directly?
Thanks
>
> Signed-off-by: Xie Yongji <xieyongji@...edance.com>
> ---
> drivers/vdpa/vdpa_user/vduse_dev.c | 50 +++++++++++++++++++++++++++++-
> 1 file changed, 49 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> index c65f84100e30..ed06c7afd484 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -62,6 +62,7 @@ struct vduse_virtqueue {
> struct cpumask irq_affinity;
> spinlock_t irq_affinity_lock;
> struct kobject kobj;
> + bool enable_irq_wq;
> };
>
> struct vduse_dev;
> @@ -1013,6 +1014,26 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
> return ret;
> }
>
> +static int vduse_dev_inject_vq_irq(struct vduse_dev *dev,
> + struct vduse_virtqueue *vq)
> +{
> + int ret = -EINVAL;
> +
> + down_read(&dev->rwsem);
> + if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK))
> + goto unlock;
> +
> + ret = 0;
> + spin_lock_irq(&vq->irq_lock);
> + if (vq->ready && vq->cb.callback)
> + vq->cb.callback(vq->cb.private);
> + spin_unlock_irq(&vq->irq_lock);
> +unlock:
> + up_read(&dev->rwsem);
> +
> + return ret;
> +}
> +
> static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> u64 iova, u64 size)
> {
> @@ -1278,8 +1299,12 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
> break;
>
> index = array_index_nospec(index, dev->vq_num);
> - ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index]->inject,
> + if (dev->vqs[index]->enable_irq_wq)
> + ret = vduse_dev_queue_irq_work(dev,
> + &dev->vqs[index]->inject,
> dev->vqs[index]->irq_effective_cpu);
> + else
> + ret = vduse_dev_inject_vq_irq(dev, dev->vqs[index]);
> break;
> }
> case VDUSE_IOTLB_REG_UMEM: {
> @@ -1420,6 +1445,26 @@ static const struct file_operations vduse_dev_fops = {
> .llseek = noop_llseek,
> };
>
> +static ssize_t enable_irq_wq_show(struct vduse_virtqueue *vq, char *buf)
> +{
> + return sprintf(buf, "%d\n", vq->enable_irq_wq);
> +}
> +
> +static ssize_t enable_irq_wq_store(struct vduse_virtqueue *vq,
> + const char *buf, size_t count)
> +{
> + bool enabled;
> + int ret;
> +
> + ret = kstrtobool(buf, &enabled);
> + if (ret)
> + return ret;
> +
> + vq->enable_irq_wq = enabled;
> +
> + return count;
> +}
> +
> static ssize_t irq_cb_affinity_show(struct vduse_virtqueue *vq, char *buf)
> {
> return sprintf(buf, "%*pb\n", cpumask_pr_args(&vq->irq_affinity));
> @@ -1480,10 +1525,12 @@ struct vq_sysfs_entry {
> static struct vq_sysfs_entry irq_cb_affinity_attr = __ATTR_RO(irq_cb_affinity);
> static struct vq_sysfs_entry irq_cb_effective_affinity_attr =
> __ATTR_RW(irq_cb_effective_affinity);
> +static struct vq_sysfs_entry enable_irq_wq_attr = __ATTR_RW(enable_irq_wq);
>
> static struct attribute *vq_attrs[] = {
> &irq_cb_affinity_attr.attr,
> &irq_cb_effective_affinity_attr.attr,
> + &enable_irq_wq_attr.attr,
> NULL,
> };
> ATTRIBUTE_GROUPS(vq);
> @@ -1565,6 +1612,7 @@ static int vduse_dev_init_vqs(struct vduse_dev *dev, u32 vq_align, u32 vq_num)
>
> dev->vqs[i]->index = i;
> dev->vqs[i]->irq_effective_cpu = -1;
> + dev->vqs[i]->enable_irq_wq = true;
> INIT_WORK(&dev->vqs[i]->inject, vduse_vq_irq_inject);
> INIT_WORK(&dev->vqs[i]->kick, vduse_vq_kick_work);
> spin_lock_init(&dev->vqs[i]->kick_lock);
> --
> 2.20.1
>
Powered by blists - more mailing lists