[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20260113034552.62805-1-me@linux.beauty>
Date: Tue, 13 Jan 2026 11:45:52 +0800
From: Li Chen <me@...ux.beauty>
To: Dan Williams <dan.j.williams@...el.com>,
Vishal Verma <vishal.l.verma@...el.com>,
Dave Jiang <dave.jiang@...el.com>,
Ira Weiny <ira.weiny@...el.com>,
Pankaj Gupta <pankaj.gupta.linux@...il.com>,
"Michael S. Tsirkin" <mst@...hat.com>,
Cornelia Huck <cohuck@...hat.com>,
Jakub Staron <jstaron@...gle.com>,
nvdimm@...ts.linux.dev,
virtualization@...ts.linux.dev,
linux-kernel@...r.kernel.org
Cc: Li Chen <me@...ux.beauty>
Subject: [PATCH] nvdimm: virtio_pmem: serialize flush requests
Under heavy concurrent flush traffic, virtio-pmem can overflow its request
virtqueue (req_vq): virtqueue_add_sgs() starts returning -ENOSPC and the
driver logs "no free slots in the virtqueue". Shortly after that the
device enters VIRTIO_CONFIG_S_NEEDS_RESET and flush requests fail with
"virtio pmem device needs a reset".
Serialize virtio_pmem_flush() with a per-device mutex so only one flush
request is in-flight at a time. This prevents req_vq descriptor overflow
under high concurrency.
Reproducer (guest with virtio-pmem):
- mkfs.ext4 -F /dev/pmem0
- mount -t ext4 -o dax,noatime /dev/pmem0 /mnt/bench
- fio: ioengine=io_uring rw=randwrite bs=4k iodepth=64 numjobs=64
direct=1 fsync=1 runtime=30s time_based=1
- dmesg: "no free slots in the virtqueue"
"virtio pmem device needs a reset"
Fixes: 6e84200c0a29 ("virtio-pmem: Add virtio pmem driver")
Signed-off-by: Li Chen <me@...ux.beauty>
---
drivers/nvdimm/nd_virtio.c | 15 +++++++++++----
drivers/nvdimm/virtio_pmem.c | 1 +
drivers/nvdimm/virtio_pmem.h | 4 ++++
3 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index c3f07be4aa22..827a17fe7c71 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -44,19 +44,24 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
unsigned long flags;
int err, err1;
+ might_sleep();
+ mutex_lock(&vpmem->flush_lock);
+
/*
* Don't bother to submit the request to the device if the device is
* not activated.
*/
if (vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_NEEDS_RESET) {
dev_info(&vdev->dev, "virtio pmem device needs a reset\n");
- return -EIO;
+ err = -EIO;
+ goto out_unlock;
}
- might_sleep();
req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
- if (!req_data)
- return -ENOMEM;
+ if (!req_data) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
req_data->done = false;
init_waitqueue_head(&req_data->host_acked);
@@ -103,6 +108,8 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
}
kfree(req_data);
+out_unlock:
+ mutex_unlock(&vpmem->flush_lock);
return err;
};
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 2396d19ce549..77b196661905 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -64,6 +64,7 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
goto out_err;
}
+ mutex_init(&vpmem->flush_lock);
vpmem->vdev = vdev;
vdev->priv = vpmem;
err = init_vq(vpmem);
diff --git a/drivers/nvdimm/virtio_pmem.h b/drivers/nvdimm/virtio_pmem.h
index 0dddefe594c4..f72cf17f9518 100644
--- a/drivers/nvdimm/virtio_pmem.h
+++ b/drivers/nvdimm/virtio_pmem.h
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <uapi/linux/virtio_pmem.h>
#include <linux/libnvdimm.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
struct virtio_pmem_request {
@@ -35,6 +36,9 @@ struct virtio_pmem {
/* Virtio pmem request queue */
struct virtqueue *req_vq;
+ /* Serialize flush requests to the device. */
+ struct mutex flush_lock;
+
/* nvdimm bus registers virtio pmem device */
struct nvdimm_bus *nvdimm_bus;
struct nvdimm_bus_descriptor nd_desc;
--
2.52.0
Powered by blists - more mailing lists