lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <3eiwpcckfd4bhkxkxiz4iziry3v4xwx2sopndmbtynurs6rsek@cmmq4jjzlyhd>
Date: Sat, 10 Jan 2026 11:19:16 +0100
From: Jörg Rödel <joro@...tes.org>
To: Magnus Kalland <magnus@...phinics.com>
Cc: suravee.suthikulpanit@....com, robin.murphy@....com, will@...nel.org, 
	iommu@...ts.linux.dev, linux-kernel@...r.kernel.org, 
	"Tore H . Larsen" <torel@...ula.no>, "Lars B . Kristiansen" <larsk@...phinics.com>, 
	Jonas Markussen <jonas@...phinics.com>, Vasant Hegde <vasant.hegde@....com>
Subject: Re: [RFC PATCH v1] iommu/amd: Invalidate IRT cache for DMA aliases

Needs review from Vasant and/or Suravee.

On Mon, Dec 15, 2025 at 12:49:53PM +0100, Magnus Kalland wrote:
> DMA aliasing can cause interrupt remapping table entries (IRTEs) to be shared
> between multiple device IDs. The AMD IOMMU currently invalidates IRTE cache
> entries on a per-device basis whenever an IRTE is updated.
> 
> This approach can leave stale IRTE cache entries when an IRTE is cached under
> one DMA alias but later updated and invalidated only through a different alias.
> In such cases, the original device ID is never explicitly invalidated, since it
> is programmed implicitly via aliasing.
> 
> This incoherency has been observed when IRTEs are cached for one Non-Transparent
> Bridge (NTB) DMA alias and later updated through another.
> 
> This RFC proposes invalidating the interrupt remapping table cache for all DMA
> aliases when updating an IRTE.
> 
> Cc: Tore H. Larsen <torel@...ula.no>
> Co-developed-by: Lars B. Kristiansen <larsk@...phinics.com>
> Signed-off-by: Lars B. Kristiansen <larsk@...phinics.com>
> Co-developed-by: Jonas Markussen <jonas@...phinics.com>
> Signed-off-by: Jonas Markussen <jonas@...phinics.com>
> Signed-off-by: Magnus Kalland <magnus@...phinics.com>
> ---
>  drivers/iommu/amd/iommu.c | 31 ++++++++++++++++++++++++++++---
>  1 file changed, 28 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
> index 9f1d56a5e145..5ac19398024f 100644
> --- a/drivers/iommu/amd/iommu.c
> +++ b/drivers/iommu/amd/iommu.c
> @@ -31,6 +31,7 @@
>  #include <linux/irqdomain.h>
>  #include <linux/percpu.h>
>  #include <linux/cc_platform.h>
> +#include <linux/iommu.h>
>  #include <asm/irq_remapping.h>
>  #include <asm/io_apic.h>
>  #include <asm/apic.h>
> @@ -3095,22 +3096,44 @@ const struct iommu_ops amd_iommu_ops = {
>  static struct irq_chip amd_ir_chip;
>  static DEFINE_SPINLOCK(iommu_table_lock);
>  
> +static int iommu_flush_dev_irt(struct pci_dev *pdev, u16 devid, void *data)
> +{
> +	int ret;
> +	struct iommu_cmd cmd;
> +	struct amd_iommu *iommu = data;
> +
> +	build_inv_irt(&cmd, devid);
> +	ret = __iommu_queue_command_sync(iommu, &cmd, true);
> +	return ret;
> +}
> +
>  static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
>  {
>  	int ret;
>  	u64 data;
> +	int domain = iommu->pci_seg->id;
> +	unsigned int bus = PCI_BUS_NUM(devid);
> +	unsigned int devfn = devid & 0xff;
>  	unsigned long flags;
>  	struct iommu_cmd cmd, cmd2;
> +	struct pci_dev *pdev = NULL;
>  
>  	if (iommu->irtcachedis_enabled)
>  		return;
>  
> -	build_inv_irt(&cmd, devid);
>  	data = atomic64_inc_return(&iommu->cmd_sem_val);
>  	build_completion_wait(&cmd2, iommu, data);
>  
> -	raw_spin_lock_irqsave(&iommu->lock, flags);
> -	ret = __iommu_queue_command_sync(iommu, &cmd, true);
> +	pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
> +	if (pdev) {
> +		raw_spin_lock_irqsave(&iommu->lock, flags);
> +		ret = pci_for_each_dma_alias(pdev, iommu_flush_dev_irt, iommu);
> +	} else {
> +		build_inv_irt(&cmd, devid);
> +		raw_spin_lock_irqsave(&iommu->lock, flags);
> +		ret = __iommu_queue_command_sync(iommu, &cmd, true);
> +	}
> +
>  	if (ret)
>  		goto out;
>  	ret = __iommu_queue_command_sync(iommu, &cmd2, false);
> @@ -3119,6 +3142,8 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
>  	wait_on_sem(iommu, data);
>  out:
>  	raw_spin_unlock_irqrestore(&iommu->lock, flags);
> +	if (pdev)
> +		pci_dev_put(pdev);
>  }
>  
>  static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)
> -- 
> 2.43.0
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ