[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aMfQCoLuVeR0nf02@google.com>
Date: Mon, 15 Sep 2025 08:36:26 +0000
From: Mostafa Saleh <smostafa@...gle.com>
To: "Aneesh Kumar K.V (Arm)" <aneesh.kumar@...nel.org>
Cc: linux-coco@...ts.linux.dev, kvmarm@...ts.linux.dev,
linux-pci@...r.kernel.org, linux-kernel@...r.kernel.org,
aik@....com, lukas@...ner.de, Samuel Ortiz <sameo@...osinc.com>,
Xu Yilun <yilun.xu@...ux.intel.com>, Jason Gunthorpe <jgg@...pe.ca>,
Suzuki K Poulose <Suzuki.Poulose@....com>,
Steven Price <steven.price@....com>,
Catalin Marinas <catalin.marinas@....com>,
Marc Zyngier <maz@...nel.org>, Will Deacon <will@...nel.org>,
Oliver Upton <oliver.upton@...ux.dev>
Subject: Re: [RFC PATCH v1 04/38] tsm: Support DMA Allocation from private
memory
Hi Aneesh,
On Mon, Jul 28, 2025 at 07:21:41PM +0530, Aneesh Kumar K.V (Arm) wrote:
> Currently, we enforce the use of bounce buffers to ensure that memory
> accessed by non-secure devices is explicitly shared with the host [1].
> However, for secure devices, this approach must be avoided.
Sorry this might be a basic question, I just started looking into this.
I see that “force_dma_unencrypted” and “is_swiotlb_force_bounce” are only
used from DMA-direct, but it seems in your case it involves an IOMMU.
How does it influence bouncing in that case?
Thanks,
Mostafa
>
> To achieve this, we introduce a device flag that controls whether a
> bounce buffer allocation is required for the device. Additionally, this flag is
> used to manage the top IPA bit assignment for setting up
> protected/unprotected IPA aliases.
>
> [1] commit fbf979a01375 ("arm64: Enforce bounce buffers for realm DMA")
>
> based on changes from Alexey Kardashevskiy <aik@....com>
> Signed-off-by: Aneesh Kumar K.V (Arm) <aneesh.kumar@...nel.org>
> ---
> arch/arm64/include/asm/mem_encrypt.h | 6 +-----
> arch/arm64/mm/mem_encrypt.c | 10 ++++++++++
> drivers/pci/tsm.c | 6 ++++++
> include/linux/device.h | 1 +
> include/linux/swiotlb.h | 4 ++++
> 5 files changed, 22 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h
> index 314b2b52025f..d77c10cd5b79 100644
> --- a/arch/arm64/include/asm/mem_encrypt.h
> +++ b/arch/arm64/include/asm/mem_encrypt.h
> @@ -15,14 +15,10 @@ int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops);
>
> int set_memory_encrypted(unsigned long addr, int numpages);
> int set_memory_decrypted(unsigned long addr, int numpages);
> +bool force_dma_unencrypted(struct device *dev);
>
> int realm_register_memory_enc_ops(void);
>
> -static inline bool force_dma_unencrypted(struct device *dev)
> -{
> - return is_realm_world();
> -}
> -
> /*
> * For Arm CCA guests, canonical addresses are "encrypted", so no changes
> * required for dma_addr_encrypted().
> diff --git a/arch/arm64/mm/mem_encrypt.c b/arch/arm64/mm/mem_encrypt.c
> index ee3c0ab04384..279696a8af3f 100644
> --- a/arch/arm64/mm/mem_encrypt.c
> +++ b/arch/arm64/mm/mem_encrypt.c
> @@ -17,6 +17,7 @@
> #include <linux/compiler.h>
> #include <linux/err.h>
> #include <linux/mm.h>
> +#include <linux/device.h>
>
> #include <asm/mem_encrypt.h>
>
> @@ -48,3 +49,12 @@ int set_memory_decrypted(unsigned long addr, int numpages)
> return crypt_ops->decrypt(addr, numpages);
> }
> EXPORT_SYMBOL_GPL(set_memory_decrypted);
> +
> +bool force_dma_unencrypted(struct device *dev)
> +{
> + if (dev->tdi_enabled)
> + return false;
> +
> + return is_realm_world();
> +}
> +EXPORT_SYMBOL_GPL(force_dma_unencrypted);
> diff --git a/drivers/pci/tsm.c b/drivers/pci/tsm.c
> index e4a3b5b37939..60f50d57a725 100644
> --- a/drivers/pci/tsm.c
> +++ b/drivers/pci/tsm.c
> @@ -120,6 +120,7 @@ static int pci_tsm_disconnect(struct pci_dev *pdev)
>
> tsm_ops->disconnect(pdev);
> tsm->state = PCI_TSM_INIT;
> + pdev->dev.tdi_enabled = false;
>
> return 0;
> }
> @@ -199,6 +200,8 @@ static int pci_tsm_accept(struct pci_dev *pdev)
> if (rc)
> return rc;
> tsm->state = PCI_TSM_ACCEPT;
> + pdev->dev.tdi_enabled = true;
> +
> return 0;
> }
>
> @@ -557,6 +560,9 @@ static void __pci_tsm_init(struct pci_dev *pdev)
> default:
> break;
> }
> +
> + /* FIXME!! should this be default true and switch to false for TEE capable device */
> + pdev->dev.tdi_enabled = false;
> }
>
> void pci_tsm_init(struct pci_dev *pdev)
> diff --git a/include/linux/device.h b/include/linux/device.h
> index 4940db137fff..d62e0dd9d8ee 100644
> --- a/include/linux/device.h
> +++ b/include/linux/device.h
> @@ -688,6 +688,7 @@ struct device {
> #ifdef CONFIG_IOMMU_DMA
> bool dma_iommu:1;
> #endif
> + bool tdi_enabled:1;
> };
>
> /**
> diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
> index 3dae0f592063..61e7cff7768b 100644
> --- a/include/linux/swiotlb.h
> +++ b/include/linux/swiotlb.h
> @@ -173,6 +173,10 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
> {
> struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
>
> + if (dev->tdi_enabled) {
> + dev_warn_once(dev, "(TIO) Disable SWIOTLB");
> + return false;
> + }
> return mem && mem->force_bounce;
> }
>
> --
> 2.43.0
>
Powered by blists - more mailing lists