[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAPcyv4inCFFXmg0r5+h0O6cADpt9HdboVDEL00XX-wGroy-7LQ@mail.gmail.com>
Date: Wed, 25 Aug 2021 10:25:40 -0700
From: Dan Williams <dan.j.williams@...el.com>
To: Pankaj Gupta <pankaj.gupta.linux@...il.com>
Cc: Linux NVDIMM <nvdimm@...ts.linux.dev>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
jmoyer <jmoyer@...hat.com>, David Hildenbrand <david@...hat.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Cornelia Huck <cohuck@...hat.com>,
Vishal L Verma <vishal.l.verma@...el.com>,
Dave Jiang <dave.jiang@...el.com>,
"Weiny, Ira" <ira.weiny@...el.com>,
Pankaj Gupta <pankaj.gupta@...os.com>
Subject: Re: [RFC v2 1/2] virtio-pmem: Async virtio-pmem flush
On Sun, Jul 25, 2021 at 11:09 PM Pankaj Gupta
<pankaj.gupta.linux@...il.com> wrote:
>
> From: Pankaj Gupta <pankaj.gupta@...os.com>
>
> Implement asynchronous flush for virtio pmem using work queue
> to solve the preflush ordering issue. Also, coalesce the flush
> requests when a flush is already in process.
>
> Signed-off-by: Pankaj Gupta <pankaj.gupta@...os.com>
> ---
> drivers/nvdimm/nd_virtio.c | 72 ++++++++++++++++++++++++++++--------
> drivers/nvdimm/virtio_pmem.c | 10 ++++-
> drivers/nvdimm/virtio_pmem.h | 14 +++++++
> 3 files changed, 79 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
> index 10351d5b49fa..61b655b583be 100644
> --- a/drivers/nvdimm/nd_virtio.c
> +++ b/drivers/nvdimm/nd_virtio.c
> @@ -97,29 +97,69 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
> return err;
> };
>
> +static void submit_async_flush(struct work_struct *ws);
> +
> /* The asynchronous flush callback function */
> int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
> {
> - /*
> - * Create child bio for asynchronous flush and chain with
> - * parent bio. Otherwise directly call nd_region flush.
> + /* queue asynchronous flush and coalesce the flush requests */
> + struct virtio_device *vdev = nd_region->provider_data;
> + struct virtio_pmem *vpmem = vdev->priv;
> + ktime_t req_start = ktime_get_boottime();
> +
> + spin_lock_irq(&vpmem->lock);
> + /* flush requests wait until ongoing flush completes,
> + * hence coalescing all the pending requests.
> */
> - if (bio && bio->bi_iter.bi_sector != -1) {
> - struct bio *child = bio_alloc(GFP_ATOMIC, 0);
> -
> - if (!child)
> - return -ENOMEM;
> - bio_copy_dev(child, bio);
> - child->bi_opf = REQ_PREFLUSH;
> - child->bi_iter.bi_sector = -1;
> - bio_chain(child, bio);
> - submit_bio(child);
> - return 0;
> + wait_event_lock_irq(vpmem->sb_wait,
> + !vpmem->flush_bio ||
> + ktime_before(req_start, vpmem->prev_flush_start),
> + vpmem->lock);
> + /* new request after previous flush is completed */
> + if (ktime_after(req_start, vpmem->prev_flush_start)) {
> + WARN_ON(vpmem->flush_bio);
> + vpmem->flush_bio = bio;
> + bio = NULL;
> + }
Why the dance with ->prev_flush_start vs just calling queue_work()
again. queue_work() is naturally coalescing in that if the last work
request has not started execution another queue attempt will be
dropped.
> + spin_unlock_irq(&vpmem->lock);
> +
> + if (!bio) {
> + INIT_WORK(&vpmem->flush_work, submit_async_flush);
I expect this only needs to be initialized once at driver init time.
> + queue_work(vpmem->pmem_wq, &vpmem->flush_work);
> + return 1;
> + }
> +
> + /* flush completed in other context while we waited */
> + if (bio && (bio->bi_opf & REQ_PREFLUSH)) {
> + bio->bi_opf &= ~REQ_PREFLUSH;
> + submit_bio(bio);
> + } else if (bio && (bio->bi_opf & REQ_FUA)) {
> + bio->bi_opf &= ~REQ_FUA;
> + bio_endio(bio);
It's not clear to me how this happens, shouldn't all flush completions
be driven from the work completion?
> }
> - if (virtio_pmem_flush(nd_region))
> - return -EIO;
>
> return 0;
> };
> EXPORT_SYMBOL_GPL(async_pmem_flush);
> +
> +static void submit_async_flush(struct work_struct *ws)
> +{
> + struct virtio_pmem *vpmem = container_of(ws, struct virtio_pmem, flush_work);
> + struct bio *bio = vpmem->flush_bio;
> +
> + vpmem->start_flush = ktime_get_boottime();
> + bio->bi_status = errno_to_blk_status(virtio_pmem_flush(vpmem->nd_region));
> + vpmem->prev_flush_start = vpmem->start_flush;
> + vpmem->flush_bio = NULL;
> + wake_up(&vpmem->sb_wait);
> +
> + /* Submit parent bio only for PREFLUSH */
> + if (bio && (bio->bi_opf & REQ_PREFLUSH)) {
> + bio->bi_opf &= ~REQ_PREFLUSH;
> + submit_bio(bio);
> + } else if (bio && (bio->bi_opf & REQ_FUA)) {
> + bio->bi_opf &= ~REQ_FUA;
> + bio_endio(bio);
> + }
Shouldn't the wait_event_lock_irq() be here rather than in
async_pmem_flush()? That will cause the workqueue to back up and flush
requests to coalesce.
> +}
> MODULE_LICENSE("GPL");
> diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
> index 726c7354d465..56780a6140c7 100644
> --- a/drivers/nvdimm/virtio_pmem.c
> +++ b/drivers/nvdimm/virtio_pmem.c
> @@ -24,6 +24,7 @@ static int init_vq(struct virtio_pmem *vpmem)
> return PTR_ERR(vpmem->req_vq);
>
> spin_lock_init(&vpmem->pmem_lock);
> + spin_lock_init(&vpmem->lock);
Why 2 locks?
Powered by blists - more mailing lists