lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID:
 <SI2PR01MB43930D9F3195452DD6BC4C69DC98A@SI2PR01MB4393.apcprd01.prod.exchangelabs.com>
Date: Wed, 4 Feb 2026 23:16:23 +0800
From: Wei Wang <wei.w.wang@...mail.com>
To: jgg@...dia.com, kevin.tian@...el.com, alex@...zbot.org, joro@...tes.org,
 thomas.lendacky@....com, vasant.hegde@....com,
 suravee.suthikulpanit@....com, aik@....com
Cc: iommu@...ts.linux.dev, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 2/2] vfio/type1: Set IOMMU_MMIO in dma->prot for
 MMIO-backed addresses

On 1/26/26 3:19 AM, Wei Wang wrote:
> Before requesting the IOMMU driver to map an IOVA to a physical address,
> set the IOMMU_MMIO flag in dma->prot when the physical address corresponds
> to MMIO. This allows the IOMMU driver to handle MMIO mappings specially.
> For example, on AMD CPUs with SME enabled, the IOMMU driver avoids setting
> the C-bit if iommu_map() is called with IOMMU_MMIO set in prot. This
> prevents issues with PCIe P2P communication when IOVA is used.
> 
> Signed-off-by: Wei Wang <wei.w.wang@...mail.com>
> Reviewed-by: Kevin Tian <kevin.tian@...el.com>
> ---
>   drivers/vfio/vfio_iommu_type1.c | 15 ++++++++++-----
>   1 file changed, 10 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 5167bec14e36..9fbd2ba519ca 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -583,7 +583,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
>    * returned initial pfn are provided; subsequent pfns are contiguous.
>    */
>   static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
> -			   unsigned long npages, int prot, unsigned long *pfn,
> +			   unsigned long npages, int *prot, unsigned long *pfn,
>   			   struct vfio_batch *batch)
>   {
>   	unsigned long pin_pages = min_t(unsigned long, npages, batch->capacity);
> @@ -591,7 +591,7 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
>   	unsigned int flags = 0;
>   	long ret;
>   
> -	if (prot & IOMMU_WRITE)
> +	if (*prot & IOMMU_WRITE)
>   		flags |= FOLL_WRITE;
>   
>   	mmap_read_lock(mm);
> @@ -615,7 +615,7 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
>   		unsigned long addr_mask;
>   
>   		ret = follow_fault_pfn(vma, mm, vaddr, pfn, &addr_mask,
> -				       prot & IOMMU_WRITE);
> +				       *prot & IOMMU_WRITE);
>   		if (ret == -EAGAIN)
>   			goto retry;
>   
> @@ -623,6 +623,9 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
>   			if (is_invalid_reserved_pfn(*pfn)) {
>   				unsigned long epfn;
>   
> +				if (vma->vm_flags & VM_IO)
> +					*prot |= IOMMU_MMIO;
> +
>   				epfn = (*pfn | (~addr_mask >> PAGE_SHIFT)) + 1;
>   				ret = min_t(long, npages, epfn - *pfn);
>   			} else {
> @@ -709,7 +712,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
>   			cond_resched();
>   
>   			/* Empty batch, so refill it. */
> -			ret = vaddr_get_pfns(mm, vaddr, npage, dma->prot,
> +			ret = vaddr_get_pfns(mm, vaddr, npage, &dma->prot,
>   					     &pfn, batch);
>   			if (ret < 0)
>   				goto unpin_out;
> @@ -792,6 +795,8 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
>   
>   out:
>   	dma->has_rsvd |= rsvd;
> +	if (!rsvd)
> +		dma->prot &= ~IOMMU_MMIO;
>   	ret = vfio_lock_acct(dma, lock_acct, false);
>   
>   unpin_out:
> @@ -850,7 +855,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
>   
>   	vfio_batch_init_single(&batch);
>   
> -	ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, &batch);
> +	ret = vaddr_get_pfns(mm, vaddr, 1, &dma->prot, pfn_base, &batch);
>   	if (ret != 1)
>   		goto out;
>   

Hi Alexey,

Just checking in — do you have any further comments on this patch?

For reference, the previous discussion is here:
https://lore.kernel.org/lkml/SI2PR01MB439311868FBFB7DDAD19692CDCB4A@SI2PR01MB4393.apcprd01.prod.exchangelabs.com/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ