lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220620124723.196723432@linuxfoundation.org>
Date:   Mon, 20 Jun 2022 14:51:41 +0200
From:   Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To:     linux-kernel@...r.kernel.org
Cc:     Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        stable@...r.kernel.org, Robin Murphy <robin.murphy@....com>,
        Christoph Hellwig <hch@....de>,
        David Rientjes <rientjes@...gle.com>
Subject: [PATCH 5.10 78/84] dma-direct: dont over-decrypt memory

From: Robin Murphy <robin.murphy@....com>

commit 4a37f3dd9a83186cb88d44808ab35b78375082c9 upstream.

The original x86 sev_alloc() only called set_memory_decrypted() on
memory returned by alloc_pages_node(), so the page order calculation
fell out of that logic. However, the common dma-direct code has several
potential allocators, not all of which are guaranteed to round up the
underlying allocation to a power-of-two size, so carrying over that
calculation for the encryption/decryption size was a mistake. Fix it by
rounding to a *number* of pages, rather than an order.

Until recently there was an even worse interaction with DMA_DIRECT_REMAP
where we could have ended up decrypting part of the next adjacent
vmalloc area, only averted by no architecture actually supporting both
configs at once. Don't ask how I found that one out...

Fixes: c10f07aa27da ("dma/direct: Handle force decryption for DMA coherent buffers in common code")
Signed-off-by: Robin Murphy <robin.murphy@....com>
Signed-off-by: Christoph Hellwig <hch@....de>
Acked-by: David Rientjes <rientjes@...gle.com>
[ backport the functional change without all the prior refactoring ]
Signed-off-by: Robin Murphy <robin.murphy@....com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
 kernel/dma/direct.c |   16 ++++++----------
 1 file changed, 6 insertions(+), 10 deletions(-)

--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -188,7 +188,7 @@ void *dma_direct_alloc(struct device *de
 			goto out_free_pages;
 		if (force_dma_unencrypted(dev)) {
 			err = set_memory_decrypted((unsigned long)ret,
-						   1 << get_order(size));
+						   PFN_UP(size));
 			if (err)
 				goto out_free_pages;
 		}
@@ -210,7 +210,7 @@ void *dma_direct_alloc(struct device *de
 	ret = page_address(page);
 	if (force_dma_unencrypted(dev)) {
 		err = set_memory_decrypted((unsigned long)ret,
-					   1 << get_order(size));
+					   PFN_UP(size));
 		if (err)
 			goto out_free_pages;
 	}
@@ -231,7 +231,7 @@ done:
 out_encrypt_pages:
 	if (force_dma_unencrypted(dev)) {
 		err = set_memory_encrypted((unsigned long)page_address(page),
-					   1 << get_order(size));
+					   PFN_UP(size));
 		/* If memory cannot be re-encrypted, it must be leaked */
 		if (err)
 			return NULL;
@@ -244,8 +244,6 @@ out_free_pages:
 void dma_direct_free(struct device *dev, size_t size,
 		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
-	unsigned int page_order = get_order(size);
-
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
 	    !force_dma_unencrypted(dev)) {
 		/* cpu_addr is a struct page cookie, not a kernel address */
@@ -266,7 +264,7 @@ void dma_direct_free(struct device *dev,
 		return;
 
 	if (force_dma_unencrypted(dev))
-		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+		set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size));
 
 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
 		vunmap(cpu_addr);
@@ -302,8 +300,7 @@ struct page *dma_direct_alloc_pages(stru
 
 	ret = page_address(page);
 	if (force_dma_unencrypted(dev)) {
-		if (set_memory_decrypted((unsigned long)ret,
-				1 << get_order(size)))
+		if (set_memory_decrypted((unsigned long)ret, PFN_UP(size)))
 			goto out_free_pages;
 	}
 	memset(ret, 0, size);
@@ -318,7 +315,6 @@ void dma_direct_free_pages(struct device
 		struct page *page, dma_addr_t dma_addr,
 		enum dma_data_direction dir)
 {
-	unsigned int page_order = get_order(size);
 	void *vaddr = page_address(page);
 
 	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
@@ -327,7 +323,7 @@ void dma_direct_free_pages(struct device
 		return;
 
 	if (force_dma_unencrypted(dev))
-		set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
+		set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
 
 	dma_free_contiguous(dev, page, size);
 }


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ