lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260120064255.179425-2-aneesh.kumar@kernel.org>
Date: Tue, 20 Jan 2026 12:12:55 +0530
From: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@...nel.org>
To: linux-kernel@...r.kernel.org,
	iommu@...ts.linux.dev,
	linux-coco@...ts.linux.dev
Cc: Catalin Marinas <catalin.marinas@....com>,
	will@...nel.org,
	robin.murphy@....com,
	suzuki.poulose@....com,
	jgg@...pe.ca,
	steven.price@....com,
	Marek Szyprowski <m.szyprowski@...sung.com>,
	"Aneesh Kumar K.V (Arm)" <aneesh.kumar@...nel.org>
Subject: [PATCH 2/2] dma-direct: Make phys_to_dma() pick encrypted vs unencrypted per device

On systems that apply an address encryption tag/mask to DMA addresses, the
choice of encrypted vs unencrypted DMA address is device-dependent (e.g.
TDISP trusted devices vs non-trusted devices).

Teach phys_to_dma() to make this choice based on
force_dma_unencrypted(dev), and convert dma-direct users to call
phys_to_dma() directly. With this in place, drop phys_to_dma_direct() as
redundant.

Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@...nel.org>
---
 include/linux/dma-direct.h | 18 +++++++++++-------
 kernel/dma/direct.c        | 20 ++++++--------------
 2 files changed, 17 insertions(+), 21 deletions(-)

diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index c249912456f9..e2e3a08373a1 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -90,17 +90,21 @@ static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
 {
 	return dma_addr_unencrypted(__phys_to_dma(dev, paddr));
 }
-/*
- * If memory encryption is supported, phys_to_dma will set the memory encryption
- * bit in the DMA address, and dma_to_phys will clear it.
- * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
- * buffers.
- */
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+
+static inline dma_addr_t phys_to_dma_encrypted(struct device *dev,
+					       phys_addr_t paddr)
 {
 	return dma_addr_encrypted(__phys_to_dma(dev, paddr));
 }
 
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+
+	if (force_dma_unencrypted(dev))
+		return phys_to_dma_unencrypted(dev, paddr);
+	return phys_to_dma_encrypted(dev, paddr);
+}
+
 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
 {
 	phys_addr_t paddr;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index a5639e9415f5..59d7d9e15e17 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -23,14 +23,6 @@
  */
 u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);
 
-static inline dma_addr_t phys_to_dma_direct(struct device *dev,
-		phys_addr_t phys)
-{
-	if (force_dma_unencrypted(dev))
-		return phys_to_dma_unencrypted(dev, phys);
-	return phys_to_dma(dev, phys);
-}
-
 static inline struct page *dma_direct_to_page(struct device *dev,
 		dma_addr_t dma_addr)
 {
@@ -40,7 +32,7 @@ static inline struct page *dma_direct_to_page(struct device *dev,
 u64 dma_direct_get_required_mask(struct device *dev)
 {
 	phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
-	u64 max_dma = phys_to_dma_direct(dev, phys);
+	u64 max_dma = phys_to_dma(dev, phys);
 
 	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
 }
@@ -69,7 +61,7 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
 
 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
 {
-	dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
+	dma_addr_t dma_addr = phys_to_dma(dev, phys);
 
 	if (dma_addr == DMA_MAPPING_ERROR)
 		return false;
@@ -178,7 +170,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 	page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
 	if (!page)
 		return NULL;
-	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+	*dma_handle = phys_to_dma(dev, page_to_phys(page));
 	return ret;
 }
 
@@ -196,7 +188,7 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
 		arch_dma_prep_coherent(page, size);
 
 	/* return the page pointer as the opaque cookie */
-	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+	*dma_handle = phys_to_dma(dev, page_to_phys(page));
 	return page;
 }
 
@@ -311,7 +303,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 			goto out_encrypt_pages;
 	}
 
-	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+	*dma_handle = phys_to_dma(dev, page_to_phys(page));
 	return ret;
 
 out_encrypt_pages:
@@ -392,7 +384,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
 	if (dma_set_decrypted(dev, ret, size))
 		goto out_leak_pages;
 	memset(ret, 0, size);
-	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+	*dma_handle = phys_to_dma(dev, page_to_phys(page));
 	return page;
 out_leak_pages:
 	return NULL;
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ