lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250529053513.1592088-7-yilun.xu@linux.intel.com>
Date: Thu, 29 May 2025 13:34:49 +0800
From: Xu Yilun <yilun.xu@...ux.intel.com>
To: kvm@...r.kernel.org,
	sumit.semwal@...aro.org,
	christian.koenig@....com,
	pbonzini@...hat.com,
	seanjc@...gle.com,
	alex.williamson@...hat.com,
	jgg@...dia.com,
	dan.j.williams@...el.com,
	aik@....com,
	linux-coco@...ts.linux.dev
Cc: dri-devel@...ts.freedesktop.org,
	linux-media@...r.kernel.org,
	linaro-mm-sig@...ts.linaro.org,
	vivek.kasireddy@...el.com,
	yilun.xu@...el.com,
	yilun.xu@...ux.intel.com,
	linux-kernel@...r.kernel.org,
	lukas@...ner.de,
	yan.y.zhao@...el.com,
	daniel.vetter@...ll.ch,
	leon@...nel.org,
	baolu.lu@...ux.intel.com,
	zhenzhong.duan@...el.com,
	tao1.su@...el.com,
	linux-pci@...r.kernel.org,
	zhiw@...dia.com,
	simona.vetter@...ll.ch,
	shameerali.kolothum.thodi@...wei.com,
	aneesh.kumar@...nel.org,
	iommu@...ts.linux.dev,
	kevin.tian@...el.com
Subject: [RFC PATCH 06/30] HACK: vfio/pci: Support get_pfn() callback for dma-buf

This is to support private device/MMMIO assignment, but is an
incomplete implementation as discussed. In this case, VFIO PCI act as
the exporter for MMIO regions and KVM is the importer. KVM imports the
dma-buf FD and gets MMIO pfn through dma_buf_ops.get_pfn(), then map
the pfn in KVM MMU. KVM should also react to dma-buf move notify, unmap
all pfns when VFIO revokes the MMIOs. I.e VFIO controls the lifetime of
the MMIOs.

Previously, KVM uses follow_pfn() to get the MMIO pfn. With dma-buf,
KVM no longer needs to firstly map the MMIOs to host page table. It
also solves the concern in Confidential Computing (CC) that host is not
allowed to have mapping to private resources owned by guest.

Signed-off-by: Xu Yilun <yilun.xu@...ux.intel.com>
---
 drivers/vfio/pci/vfio_pci_dmabuf.c | 34 ++++++++++++++++++++++++++++++
 1 file changed, 34 insertions(+)

diff --git a/drivers/vfio/pci/vfio_pci_dmabuf.c b/drivers/vfio/pci/vfio_pci_dmabuf.c
index a4c313ca5bda..cf9a90448856 100644
--- a/drivers/vfio/pci/vfio_pci_dmabuf.c
+++ b/drivers/vfio/pci/vfio_pci_dmabuf.c
@@ -174,6 +174,39 @@ static void vfio_pci_dma_buf_unmap(struct dma_buf_attachment *attachment,
 	kfree(sgt);
 }
 
+static int vfio_pci_dma_buf_get_pfn(struct dma_buf_attachment *attachment,
+				    pgoff_t pgoff, u64 *pfn, int *max_order)
+{
+	struct vfio_pci_dma_buf *priv = attachment->dmabuf->priv;
+	struct vfio_region_dma_range *dma_ranges = priv->dma_ranges;
+	u64 offset = pgoff << PAGE_SHIFT;
+	int i;
+
+	dma_resv_assert_held(priv->dmabuf->resv);
+
+	if (priv->revoked)
+		return -ENODEV;
+
+	if (offset >= priv->dmabuf->size)
+		return -EINVAL;
+
+	for (i = 0; i < priv->nr_ranges; i++) {
+		if (offset < dma_ranges[i].length)
+			break;
+
+		offset -= dma_ranges[i].length;
+	}
+
+	*pfn = PHYS_PFN(pci_resource_start(priv->vdev->pdev, dma_ranges[i].region_index) +
+			dma_ranges[i].offset + offset);
+
+	/* TODO: large page mapping is yet to be supported */
+	if (max_order)
+		*max_order = 0;
+
+	return 0;
+}
+
 static void vfio_pci_dma_buf_release(struct dma_buf *dmabuf)
 {
 	struct vfio_pci_dma_buf *priv = dmabuf->priv;
@@ -198,6 +231,7 @@ static const struct dma_buf_ops vfio_pci_dmabuf_ops = {
 	.unpin = vfio_pci_dma_buf_unpin,
 	.release = vfio_pci_dma_buf_release,
 	.unmap_dma_buf = vfio_pci_dma_buf_unmap,
+	.get_pfn = vfio_pci_dma_buf_get_pfn,
 };
 
 static int check_dma_ranges(struct vfio_pci_dma_buf *priv,
-- 
2.25.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ