[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAFQAk7hKTnzyc8Vnnp7UNhjZBqYQC_dukBOEgR255izUNk0_Qw@mail.gmail.com>
Date: Sun, 11 Dec 2022 19:01:00 +0800
From: Jiachen Zhang <zhangjiachen.jaycee@...edance.com>
To: Vivek Goyal <vgoyal@...hat.com>,
Stefan Hajnoczi <stefanha@...hat.com>,
Miklos Szeredi <miklos@...redi.hu>
Cc: virtualization@...ts.linux-foundation.org,
"open list:FUSE: FILESYSTEM IN USERSPACE"
<linux-fsdevel@...r.kernel.org>,
open list <linux-kernel@...r.kernel.org>,
Connor Kuehl <ckuehl@...hat.com>
Subject: Re: [PATCH] virtiofs: enable multiple request queues
On Sun, Dec 11, 2022 at 6:40 PM Jiachen Zhang
<zhangjiachen.jaycee@...edance.com> wrote:
>
> Support virtio-fs multiple virtqueues and distribute requests across the
> multiqueue complex automatically based on the IRQ affinity.
>
> This commit is based on Connor's patch in the virtio-fs mailing-list,
> and additionally intergates cpu-to-vq map into struct virtio_fs so that
> this virtio-fs multi-queue feature can fit into multiple virtio-fs mounts.
>
> Link: https://www.mail-archive.com/virtio-fs@redhat.com/msg03320.html
> Suggested-by: Stefan Hajnoczi <stefanha@...hat.com>
> Cc: Connor Kuehl <ckuehl@...hat.com>
> Signed-off-by: Jiachen Zhang <zhangjiachen.jaycee@...edance.com>
> ---
Hi all,
The corresponding QEMU virtiofsd changes can be found in the
qemu-devel mailing list. The mail link is
https://lore.kernel.org/qemu-devel/20221211104743.27333-1-zhangjiachen.jaycee@bytedance.com/
.
To enable this multi-queue feature with QEMU emulated virtio-fs
devices, you should specify both the qemu-system-x86_64 vhost-user-fs
parameter and the virtiofsd parameter.
For example, to setup 16 virtio-fs request queues, you should apply
the kernel patch in this mail, the QEMU vhost-user-fs device should be
like '-device vhost-user-fs-pci,chardev=char0,tag=myfs,num-request-queues=16',
and for the virtiofsd you should specify '-o num_request_queues=16'.
Thanks,
Jiachen
> fs/fuse/virtio_fs.c | 37 +++++++++++++++++++++++++++++--------
> 1 file changed, 29 insertions(+), 8 deletions(-)
>
> diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
> index 4d8d4f16c727..410968dede0c 100644
> --- a/fs/fuse/virtio_fs.c
> +++ b/fs/fuse/virtio_fs.c
> @@ -32,8 +32,9 @@ static DEFINE_MUTEX(virtio_fs_mutex);
> static LIST_HEAD(virtio_fs_instances);
>
> enum {
> - VQ_HIPRIO,
> - VQ_REQUEST
> + VQ_HIPRIO = 0,
> + /* TODO add VQ_NOTIFICATION according to the virtio 1.2 spec. */
> + VQ_REQUEST = 1,
> };
>
> #define VQ_NAME_LEN 24
> @@ -59,6 +60,7 @@ struct virtio_fs {
> struct list_head list; /* on virtio_fs_instances */
> char *tag;
> struct virtio_fs_vq *vqs;
> + struct virtio_fs_vq * __percpu *vq_proxy;
> unsigned int nvqs; /* number of virtqueues */
> unsigned int num_request_queues; /* number of request queues */
> struct dax_device *dax_dev;
> @@ -686,6 +688,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
> struct virtqueue **vqs;
> vq_callback_t **callbacks;
> const char **names;
> + struct irq_affinity desc = { .pre_vectors = 1, .nr_sets = 1, };
> unsigned int i;
> int ret = 0;
>
> @@ -694,11 +697,16 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
> if (fs->num_request_queues == 0)
> return -EINVAL;
>
> + fs->num_request_queues = min_t(unsigned int, nr_cpu_ids,
> + fs->num_request_queues);
> +
> fs->nvqs = VQ_REQUEST + fs->num_request_queues;
> fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
> if (!fs->vqs)
> return -ENOMEM;
>
> + pr_debug("virtio-fs: number of vqs: %d\n", fs->nvqs);
> +
> vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
> callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
> GFP_KERNEL);
> @@ -723,12 +731,26 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
> names[i] = fs->vqs[i].name;
> }
>
> - ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
> + ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc);
> if (ret < 0)
> goto out;
>
> - for (i = 0; i < fs->nvqs; i++)
> + fs->vq_proxy = alloc_percpu(struct virtio_fs_vq *);
> + for (i = 0; i < fs->nvqs; i++) {
> + const struct cpumask *mask;
> + unsigned int cpu;
> +
> fs->vqs[i].vq = vqs[i];
> + if (i == VQ_HIPRIO)
> + continue;
> +
> + mask = vdev->config->get_vq_affinity(vdev, i);
> + for_each_cpu(cpu, mask) {
> + struct virtio_fs_vq **cpu_vq = per_cpu_ptr(fs->vq_proxy, cpu);
> + *cpu_vq = &fs->vqs[i];
> + pr_debug("virtio-fs: map cpu %d to vq%d\n", cpu, i);
> + }
> + }
>
> virtio_fs_start_all_queues(fs);
> out:
> @@ -875,8 +897,6 @@ static int virtio_fs_probe(struct virtio_device *vdev)
> if (ret < 0)
> goto out;
>
> - /* TODO vq affinity */
> -
> ret = virtio_fs_setup_dax(vdev, fs);
> if (ret < 0)
> goto out_vqs;
> @@ -926,6 +946,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
> virtio_fs_stop_all_queues(fs);
> virtio_fs_drain_all_queues_locked(fs);
> virtio_reset_device(vdev);
> + free_percpu(fs->vq_proxy);
> virtio_fs_cleanup_vqs(vdev);
>
> vdev->priv = NULL;
> @@ -1223,7 +1244,6 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
> static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
> __releases(fiq->lock)
> {
> - unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
> struct virtio_fs *fs;
> struct fuse_req *req;
> struct virtio_fs_vq *fsvq;
> @@ -1243,7 +1263,8 @@ __releases(fiq->lock)
> req->in.h.nodeid, req->in.h.len,
> fuse_len_args(req->args->out_numargs, req->args->out_args));
>
> - fsvq = &fs->vqs[queue_id];
> + fsvq = this_cpu_read(*fs->vq_proxy);
> +
> ret = virtio_fs_enqueue_req(fsvq, req, false);
> if (ret < 0) {
> if (ret == -ENOMEM || ret == -ENOSPC) {
> --
> 2.20.1
>
Powered by blists - more mailing lists