[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250904040828.319452-11-ankita@nvidia.com>
Date: Thu, 4 Sep 2025 04:08:24 +0000
From: <ankita@...dia.com>
To: <ankita@...dia.com>, <jgg@...dia.com>, <alex.williamson@...hat.com>,
<yishaih@...dia.com>, <skolothumtho@...dia.com>, <kevin.tian@...el.com>,
<yi.l.liu@...el.com>, <zhiw@...dia.com>
CC: <aniketa@...dia.com>, <cjia@...dia.com>, <kwankhede@...dia.com>,
<targupta@...dia.com>, <vsethi@...dia.com>, <acurrid@...dia.com>,
<apopple@...dia.com>, <jhubbard@...dia.com>, <danw@...dia.com>,
<anuaggarwal@...dia.com>, <mochs@...dia.com>, <kjaju@...dia.com>,
<dnigam@...dia.com>, <kvm@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: [RFC 10/14] vfio/nvgrace-egm: Clear Memory before handing out to VM
From: Ankit Agrawal <ankita@...dia.com>
The EGM region is invisible to the host Linux kernel and it does not
manage the region. The EGM module manages the EGM memory and thus is
responsible to clear out the region before handing out to the VM.
Clear EGM region on EGM chardev open. It is possible to trigger open
multiple times by tools such as kvmtool. Thus ensure the region is
cleared only on the first open.
Suggested-by: Vikram Sethi <vsethi@...dia.com>
Signed-off-by: Ankit Agrawal <ankita@...dia.com>
---
drivers/vfio/pci/nvgrace-gpu/egm.c | 28 +++++++++++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/drivers/vfio/pci/nvgrace-gpu/egm.c b/drivers/vfio/pci/nvgrace-gpu/egm.c
index 7bf6a05aa967..bf1241ed1d60 100644
--- a/drivers/vfio/pci/nvgrace-gpu/egm.c
+++ b/drivers/vfio/pci/nvgrace-gpu/egm.c
@@ -15,6 +15,7 @@ static DEFINE_XARRAY(egm_chardevs);
struct chardev {
struct device device;
struct cdev cdev;
+ atomic_t open_count;
};
static struct nvgrace_egm_dev *
@@ -30,6 +31,26 @@ static int nvgrace_egm_open(struct inode *inode, struct file *file)
{
struct chardev *egm_chardev =
container_of(inode->i_cdev, struct chardev, cdev);
+ struct nvgrace_egm_dev *egm_dev =
+ egm_chardev_to_nvgrace_egm_dev(egm_chardev);
+ void *memaddr;
+
+ if (atomic_inc_return(&egm_chardev->open_count) > 1)
+ return 0;
+
+ /*
+ * nvgrace-egm module is responsible to manage the EGM memory as
+ * the host kernel has no knowledge of it. Clear the region before
+ * handing over to userspace.
+ */
+ memaddr = memremap(egm_dev->egmphys, egm_dev->egmlength, MEMREMAP_WB);
+ if (!memaddr) {
+ atomic_dec(&egm_chardev->open_count);
+ return -EINVAL;
+ }
+
+ memset((u8 *)memaddr, 0, egm_dev->egmlength);
+ memunmap(memaddr);
file->private_data = egm_chardev;
@@ -38,7 +59,11 @@ static int nvgrace_egm_open(struct inode *inode, struct file *file)
static int nvgrace_egm_release(struct inode *inode, struct file *file)
{
- file->private_data = NULL;
+ struct chardev *egm_chardev =
+ container_of(inode->i_cdev, struct chardev, cdev);
+
+ if (atomic_dec_and_test(&egm_chardev->open_count))
+ file->private_data = NULL;
return 0;
}
@@ -96,6 +121,7 @@ setup_egm_chardev(struct nvgrace_egm_dev *egm_dev)
egm_chardev->device.parent = &egm_dev->aux_dev.dev;
cdev_init(&egm_chardev->cdev, &file_ops);
egm_chardev->cdev.owner = THIS_MODULE;
+ atomic_set(&egm_chardev->open_count, 0);
ret = dev_set_name(&egm_chardev->device, "egm%lld", egm_dev->egmpxm);
if (ret)
--
2.34.1
Powered by blists - more mailing lists