lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230621235508.113949-20-suravee.suthikulpanit@amd.com>
Date:   Wed, 21 Jun 2023 18:55:06 -0500
From:   Suravee Suthikulpanit <suravee.suthikulpanit@....com>
To:     <linux-kernel@...r.kernel.org>, <iommu@...ts.linux.dev>,
        <kvm@...r.kernel.org>
CC:     <joro@...tes.org>, <robin.murphy@....com>, <yi.l.liu@...el.com>,
        <alex.williamson@...hat.com>, <jgg@...dia.com>,
        <nicolinc@...dia.com>, <baolu.lu@...ux.intel.com>,
        <eric.auger@...hat.com>, <pandoh@...gle.com>,
        <kumaranand@...gle.com>, <jon.grimm@....com>,
        <santosh.shukla@....com>, <vasant.hegde@....com>,
        <jay.chen@....com>, <joseph.chung@....com>,
        "Suravee Suthikulpanit" <suravee.suthikulpanit@....com>
Subject: [RFC PATCH 19/21] iommu/amd: Introduce vIOMMU ioctl for handling command buffer mapping

This ioctl interface is used for handling vIOMMU command buffer mapping.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
---
 drivers/iommu/amd/amd_iommu_types.h |  1 +
 drivers/iommu/amd/viommu.c          | 78 +++++++++++++++++++++++++++++
 2 files changed, 79 insertions(+)

diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5cb5a709b31b..dd3c79e454d8 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -261,6 +261,7 @@
 #define CMD_BUFFER_SIZE    8192
 #define CMD_BUFFER_UNINITIALIZED 1
 #define CMD_BUFFER_ENTRIES 512
+#define CMD_BUFFER_MAXSIZE 0x80000
 #define MMIO_CMD_SIZE_SHIFT 56
 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
 
diff --git a/drivers/iommu/amd/viommu.c b/drivers/iommu/amd/viommu.c
index 9ddbdbec4a75..1bd4282384c4 100644
--- a/drivers/iommu/amd/viommu.c
+++ b/drivers/iommu/amd/viommu.c
@@ -994,3 +994,81 @@ int amd_viommu_guest_mmio_write(struct amd_viommu_mmio_data *data)
 	return 0;
 }
 EXPORT_SYMBOL(amd_viommu_guest_mmio_write);
+
+static void viommu_cmdbuf_free(struct protection_domain *dom, struct io_pgtable_ops *ops,
+				   unsigned long iova, struct page **pages, unsigned long npages)
+{
+	int i;
+	unsigned long flags;
+	unsigned long tmp = iova;
+
+	spin_lock_irqsave(&dom->lock, flags);
+	for (i = 0; i < npages; i++, tmp += PAGE_SIZE) {
+		amd_iommu_v1_unmap_pages(ops, tmp, PAGE_SIZE, 1, NULL);
+		/*
+		 * Flush domain TLB(s) and wait for completion. Any Device-Table
+		 * Updates and flushing already happened in
+		 * increase_address_space().
+		 */
+		amd_iommu_domain_flush_tlb_pde(dom);
+		amd_iommu_domain_flush_complete(dom);
+
+		unpin_user_pages(&pages[i], 1);
+	}
+	spin_unlock_irqrestore(&dom->lock, flags);
+}
+
+int amd_viommu_cmdbuf_update(struct amd_viommu_cmdbuf_data *data)
+{
+	int i, numpg = data->cmdbuf_size >> PAGE_SHIFT;
+	struct amd_iommu *iommu = get_amd_iommu_from_devid(data->iommu_id);
+	struct amd_iommu_vminfo *vminfo;
+	unsigned int gid = data->gid;
+	struct page **pages;
+	unsigned long npages = 0;
+	unsigned long iova;
+	unsigned long hva = data->hva;
+
+	pages = kcalloc(numpg, sizeof(struct page *), GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	vminfo = get_vminfo(iommu, gid);
+	if (!vminfo)
+		return -EINVAL;
+
+	/*
+	 * Setup vIOMMU guest command buffer in IOMMU Private Address (IPA) space
+	 * for the specified GID.
+	 */
+	for (i = 0 ; i < numpg; i++, hva += (0x1000 * i)) {
+		int ret;
+		u64 phys;
+
+		if (get_user_pages_fast(hva, 1, FOLL_WRITE, &pages[i]) != 1) {
+			pr_err("%s: Failure locking page:%#lx.\n", __func__, hva);
+			goto err_out;
+		}
+
+		phys = __sme_set(page_to_pfn(pages[i]) << PAGE_SHIFT);
+		iova = VIOMMU_GUEST_CMDBUF_BASE + (i * PAGE_SIZE) + (gid * CMD_BUFFER_MAXSIZE);
+
+		pr_debug("%s: iova=%#lx, phys=%#llx\n", __func__, iova, phys);
+		ret = amd_iommu_v1_map_pages(&iommu->viommu_pdom->iop.iop.ops,
+					     iova, phys, PAGE_SIZE, 1,
+					     IOMMU_PROT_IR | IOMMU_PROT_IW,
+					     GFP_KERNEL, NULL);
+		if (ret) {
+			pr_err("%s: Failure to map page iova:%#lx, phys=%#llx\n",
+			       __func__, iova, phys);
+			goto err_out;
+		}
+		npages++;
+	}
+	return 0;
+err_out:
+	viommu_cmdbuf_free(iommu->viommu_pdom, &iommu->viommu_pdom->iop.iop.ops,
+			   iova, pages, npages);
+	return -EINVAL;
+}
+EXPORT_SYMBOL(amd_viommu_cmdbuf_update);
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ