lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201022043836.1120024-1-furquan@google.com>
Date:   Wed, 21 Oct 2020 21:38:36 -0700
From:   Furquan Shaikh <furquan@...gle.com>
To:     linux-kernel@...r.kernel.org
Cc:     Prashant Malani <pmalani@...omium.org>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Arthur Heymans <arthur@...ymans.xyz>,
        Patrick Rudolph <patrick.rudolph@...ements.com>,
        Ard Biesheuvel <ardb@...nel.org>, dlaurie@...gle.com,
        Furquan Shaikh <furquan@...gle.com>
Subject: [PATCH v2] firmware: gsmi: Drop the use of dma_pool_* API functions

GSMI driver uses dma_pool_* API functions for buffer allocation
because it requires that the SMI buffers are allocated within 32-bit
physical address space. However, this does not work well with IOMMU
since there is no real device and hence no domain associated with the
device.

Since this is not a real device, it does not require any device
address(IOVA) for the buffer allocations. The only requirement is to
ensure that the physical address allocated to the buffer is within
32-bit physical address space. This is because the buffers have
nothing to do with DMA at all. It is required for communication with
firmware executing in SMI mode which has access only to the bottom
4GiB of memory. Hence, this change switches to using a SLAB cache
created with SLAB_CACHE_DMA32 that guarantees that the allocation
happens from the DMA32 memory zone. All calls to dma_pool_* are
replaced with kmem_cache_*.

In addition to that, all the code for managing the dma_pool for GSMI
platform device is dropped.

Signed-off-by: Furquan Shaikh <furquan@...gle.com>
---
Changelog since v1:
- Switched to using SLAB cache with SLAB_CACHE_DMA32.
- Added comment to code and commit message explaining the reason for
using DMA32 memory zone.

 drivers/firmware/google/gsmi.c | 38 +++++++++++++++++++++++-----------
 1 file changed, 26 insertions(+), 12 deletions(-)

diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 7d9367b22010..092d8cb209a3 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -17,7 +17,6 @@
 #include <linux/string.h>
 #include <linux/spinlock.h>
 #include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/ioctl.h>
@@ -85,7 +84,6 @@
 struct gsmi_buf {
 	u8 *start;			/* start of buffer */
 	size_t length;			/* length of buffer */
-	dma_addr_t handle;		/* dma allocation handle */
 	u32 address;			/* physical address of buffer */
 };
 
@@ -97,7 +95,7 @@ static struct gsmi_device {
 	spinlock_t lock;		/* serialize access to SMIs */
 	u16 smi_cmd;			/* SMI command port */
 	int handshake_type;		/* firmware handler interlock type */
-	struct dma_pool *dma_pool;	/* DMA buffer pool */
+	struct kmem_cache *mem_pool;	/* kmem cache for gsmi_buf allocations */
 } gsmi_dev;
 
 /* Packed structures for communicating with the firmware */
@@ -157,14 +155,20 @@ static struct gsmi_buf *gsmi_buf_alloc(void)
 	}
 
 	/* allocate buffer in 32bit address space */
-	smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL,
-				       &smibuf->handle);
+	smibuf->start = kmem_cache_alloc(gsmi_dev.mem_pool, GFP_KERNEL);
 	if (!smibuf->start) {
 		printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
 		kfree(smibuf);
 		return NULL;
 	}
 
+	if ((u64)virt_to_phys(smibuf->start) >> 32) {
+		printk(KERN_ERR "gsmi: allocation not within 32-bit physical address space\n");
+		kfree(smibuf->start);
+		kfree(smibuf);
+		return NULL;
+	}
+
 	/* fill in the buffer handle */
 	smibuf->length = GSMI_BUF_SIZE;
 	smibuf->address = (u32)virt_to_phys(smibuf->start);
@@ -176,8 +180,7 @@ static void gsmi_buf_free(struct gsmi_buf *smibuf)
 {
 	if (smibuf) {
 		if (smibuf->start)
-			dma_pool_free(gsmi_dev.dma_pool, smibuf->start,
-				      smibuf->handle);
+			kmem_cache_free(gsmi_dev.mem_pool, smibuf->start);
 		kfree(smibuf);
 	}
 }
@@ -914,9 +917,20 @@ static __init int gsmi_init(void)
 	spin_lock_init(&gsmi_dev.lock);
 
 	ret = -ENOMEM;
-	gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
-					     GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
-	if (!gsmi_dev.dma_pool)
+
+	/*
+	 * SLAB cache is created using SLAB_CACHE_DMA32 to ensure that the
+	 * allocations for gsmi_buf come from the DMA32 memory zone. These
+	 * buffers have nothing to do with DMA. They are required for
+	 * communication with firmware executing in SMI mode which can only
+	 * access the bottom 4GiB of physical memory. Since DMA32 memory zone
+	 * guarantees allocation under the 4GiB boundary, this driver creates
+	 * a SLAB cache with SLAB_CACHE_DMA32 flag.
+	 */
+	gsmi_dev.mem_pool = kmem_cache_create("gsmi", GSMI_BUF_SIZE,
+					      GSMI_BUF_ALIGN,
+					      SLAB_CACHE_DMA32, NULL);
+	if (!gsmi_dev.mem_pool)
 		goto out_err;
 
 	/*
@@ -1032,7 +1046,7 @@ static __init int gsmi_init(void)
 	gsmi_buf_free(gsmi_dev.param_buf);
 	gsmi_buf_free(gsmi_dev.data_buf);
 	gsmi_buf_free(gsmi_dev.name_buf);
-	dma_pool_destroy(gsmi_dev.dma_pool);
+	kmem_cache_destroy(gsmi_dev.mem_pool);
 	platform_device_unregister(gsmi_dev.pdev);
 	pr_info("gsmi: failed to load: %d\n", ret);
 #ifdef CONFIG_PM
@@ -1057,7 +1071,7 @@ static void __exit gsmi_exit(void)
 	gsmi_buf_free(gsmi_dev.param_buf);
 	gsmi_buf_free(gsmi_dev.data_buf);
 	gsmi_buf_free(gsmi_dev.name_buf);
-	dma_pool_destroy(gsmi_dev.dma_pool);
+	kmem_cache_destroy(gsmi_dev.mem_pool);
 	platform_device_unregister(gsmi_dev.pdev);
 #ifdef CONFIG_PM
 	platform_driver_unregister(&gsmi_driver_info);
-- 
2.29.0.rc1.297.gfa9743e501-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ