[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190612083711.2c0cfd7e.cohuck@redhat.com>
Date: Wed, 12 Jun 2019 08:37:11 +0200
From: Cornelia Huck <cohuck@...hat.com>
To: Pankaj Gupta <pagupta@...hat.com>
Cc: dm-devel@...hat.com, linux-nvdimm@...ts.01.org,
linux-kernel@...r.kernel.org,
virtualization@...ts.linux-foundation.org, kvm@...r.kernel.org,
linux-fsdevel@...r.kernel.org, linux-acpi@...r.kernel.org,
qemu-devel@...gnu.org, linux-ext4@...r.kernel.org,
linux-xfs@...r.kernel.org,
dan j williams <dan.j.williams@...el.com>,
zwisler@...nel.org, vishal l verma <vishal.l.verma@...el.com>,
dave jiang <dave.jiang@...el.com>, mst@...hat.com,
jasowang@...hat.com, willy@...radead.org, rjw@...ysocki.net,
hch@...radead.org, lenb@...nel.org, jack@...e.cz, tytso@....edu,
adilger kernel <adilger.kernel@...ger.ca>,
darrick wong <darrick.wong@...cle.com>, lcapitulino@...hat.com,
kwolf@...hat.com, imammedo@...hat.com, jmoyer@...hat.com,
nilal@...hat.com, riel@...riel.com, stefanha@...hat.com,
aarcange@...hat.com, david@...hat.com, david@...morbit.com,
xiaoguangrong eric <xiaoguangrong.eric@...il.com>,
pbonzini@...hat.com, yuval shaia <yuval.shaia@...cle.com>,
kilobyte@...band.pl, jstaron@...gle.com, rdunlap@...radead.org,
snitzer@...hat.com
Subject: Re: [PATCH v12 2/7] virtio-pmem: Add virtio pmem driver
Hi Pankaj,
On Tue, 11 Jun 2019 23:34:50 -0400 (EDT)
Pankaj Gupta <pagupta@...hat.com> wrote:
> Hi Cornelia,
>
> > On Tue, 11 Jun 2019 22:07:57 +0530
> > Pankaj Gupta <pagupta@...hat.com> wrote:
> > > + err1 = virtqueue_kick(vpmem->req_vq);
> > > + spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
> > > + /*
> > > + * virtqueue_add_sgs failed with error different than -ENOSPC, we can't
> > > + * do anything about that.
> > > + */
> >
> > Does it make sense to kick if you couldn't add at all?
>
> When we could not add because of -ENOSPC we are waiting and when buffer is added
> then only we do a kick. For any other error which might be a rare occurrence, I think
> kick is harmless here and keeps the code clean?
Yes, I agree it does not hurt. Let's keep it as-is.
> Sure, Thank you. Attaching below on top changes on current patch2 based on
> your suggestions. Let me know if these are okay and then will send official
> v13 to for upstream merging.
Looks good to me, except for one change.
[Again sorry for the late review, did not want to get the version
numbers up :)]
>
> Thanks,
> Pankaj
>
> ===============
>
> diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
> index efc535723517..5b8d2367da0b 100644
> --- a/drivers/nvdimm/nd_virtio.c
> +++ b/drivers/nvdimm/nd_virtio.c
> @@ -10,7 +10,7 @@
> #include "nd.h"
>
> /* The interrupt handler */
> -void host_ack(struct virtqueue *vq)
> +void virtio_pmem_host_ack(struct virtqueue *vq)
> {
> struct virtio_pmem *vpmem = vq->vdev->priv;
> struct virtio_pmem_request *req_data, *req_buf;
> @@ -32,10 +32,10 @@ void host_ack(struct virtqueue *vq)
> }
> spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
> }
> -EXPORT_SYMBOL_GPL(host_ack);
> +EXPORT_SYMBOL_GPL(virtio_pmem_host_ack);
>
> /* The request submission function */
> -int virtio_pmem_flush(struct nd_region *nd_region)
> +static int virtio_pmem_flush(struct nd_region *nd_region)
> {
> struct virtio_device *vdev = nd_region->provider_data;
> struct virtio_pmem *vpmem = vdev->priv;
> @@ -69,7 +69,7 @@ int virtio_pmem_flush(struct nd_region *nd_region)
> while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
> GFP_ATOMIC)) == -ENOSPC) {
>
> - dev_err(&vdev->dev, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
> + dev_info(&vdev->dev, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
> req_data->wq_buf_avail = false;
> list_add_tail(&req_data->list, &vpmem->req_list);
> spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
> @@ -90,7 +90,8 @@ int virtio_pmem_flush(struct nd_region *nd_region)
> } else {
> /* A host repsonse results in "host_ack" getting called */
> wait_event(req_data->host_acked, req_data->done);
> - err = virtio32_to_cpu(vdev, req_data->resp.ret);
> + if ((err = virtio32_to_cpu(vdev, req_data->resp.ret)))
> + err = -EIO;
Hm, why are you making this change? I think the previous code was fine.
> }
>
> kfree(req_data);
> @@ -100,7 +101,8 @@ int virtio_pmem_flush(struct nd_region *nd_region)
> /* The asynchronous flush callback function */
> int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
> {
> - /* Create child bio for asynchronous flush and chain with
> + /*
> + * Create child bio for asynchronous flush and chain with
> * parent bio. Otherwise directly call nd_region flush.
> */
> if (bio && bio->bi_iter.bi_sector != -1) {
> diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
> index b60ebd8cd2fd..5e3d07b47e0c 100644
> --- a/drivers/nvdimm/virtio_pmem.c
> +++ b/drivers/nvdimm/virtio_pmem.c
> @@ -19,7 +19,7 @@ static int init_vq(struct virtio_pmem *vpmem)
> {
> /* single vq */
> vpmem->req_vq = virtio_find_single_vq(vpmem->vdev,
> - host_ack, "flush_queue");
> + virtio_pmem_host_ack, "flush_queue");
> if (IS_ERR(vpmem->req_vq))
> return PTR_ERR(vpmem->req_vq);
>
> diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h
> index 6e47521be158..998efbc7660c 100644
> --- a/drivers/nvdimm/virtio_pmem.h
> +++ b/drivers/nvdimm/virtio_pmem.h
> @@ -50,6 +50,6 @@ struct virtio_pmem {
> uint64_t size;
> };
>
> -void host_ack(struct virtqueue *vq);
> +void virtio_pmem_host_ack(struct virtqueue *vq);
> int async_pmem_flush(struct nd_region *nd_region, struct bio *bio);
> #endif
Powered by blists - more mailing lists