lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <10029978-31bb-4ff2-891f-564b970a41b5@amd.com>
Date: Wed, 17 Dec 2025 15:24:54 +1100
From: Alexey Kardashevskiy <aik@....com>
To: Wei Wang <wei.w.wang@...mail.com>, jgg@...dia.com, kevin.tian@...el.com,
 alex@...zbot.org, joro@...tes.org, thomas.lendacky@....com,
 vasant.hegde@....com, suravee.suthikulpanit@....com
Cc: iommu@...ts.linux.dev, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 2/2] vfio/type1: Set IOMMU_MMIO in dma->prot for
 MMIO-backed addresses



On 17/12/25 03:13, Wei Wang wrote:
> Before requesting the IOMMU driver to map an IOVA to a physical address,
> set the IOMMU_MMIO flag in dma->prot when the physical address corresponds
> to MMIO. This allows the IOMMU driver to handle MMIO mappings specially.
> For example, on AMD CPUs with SME enabled, the IOMMU driver avoids setting
> the C-bit if iommu_map() is called with IOMMU_MMIO set in prot. This
> prevents issues with PCIe P2P communication when IOVA is used.
> 
> Signed-off-by: Wei Wang <wei.w.wang@...mail.com>
> Reviewed-by: Kevin Tian <kevin.tian@...el.com>
> ---
>   drivers/vfio/vfio_iommu_type1.c | 14 +++++++++-----
>   1 file changed, 9 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 5167bec14e36..dfe53da53b80 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -583,7 +583,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
>    * returned initial pfn are provided; subsequent pfns are contiguous.
>    */
>   static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
> -			   unsigned long npages, int prot, unsigned long *pfn,
> +			   unsigned long npages, int *prot, unsigned long *pfn,
>   			   struct vfio_batch *batch)
>   {
>   	unsigned long pin_pages = min_t(unsigned long, npages, batch->capacity);
> @@ -591,7 +591,7 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
>   	unsigned int flags = 0;
>   	long ret;
>   
> -	if (prot & IOMMU_WRITE)
> +	if (*prot & IOMMU_WRITE)
>   		flags |= FOLL_WRITE;
>   
>   	mmap_read_lock(mm);
> @@ -601,6 +601,7 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
>   		*pfn = page_to_pfn(batch->pages[0]);
>   		batch->size = ret;
>   		batch->offset = 0;
> +		*prot &= ~IOMMU_MMIO;


Do you expect IOMMU_MMIO here, why?

Then, what if this vaddr_get_pfns() called with vaddr which is some RAM immediately followed by MMIO? The whole vfio_dma descriptor will get IOMMU_MMIO, hardly desirable (also quite unlikely though).

Thanks,


>   		goto done;
>   	} else if (!ret) {
>   		ret = -EFAULT;
> @@ -615,7 +616,7 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
>   		unsigned long addr_mask;
>   
>   		ret = follow_fault_pfn(vma, mm, vaddr, pfn, &addr_mask,
> -				       prot & IOMMU_WRITE);
> +				       *prot & IOMMU_WRITE);
>   		if (ret == -EAGAIN)
>   			goto retry;
>   
> @@ -623,6 +624,9 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
>   			if (is_invalid_reserved_pfn(*pfn)) {
>   				unsigned long epfn;
>   
> +				if (vma->vm_flags & VM_IO)
> +					*prot |= IOMMU_MMIO;
> +
>   				epfn = (*pfn | (~addr_mask >> PAGE_SHIFT)) + 1;
>   				ret = min_t(long, npages, epfn - *pfn);
>   			} else {
> @@ -709,7 +713,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
>   			cond_resched();
>   
>   			/* Empty batch, so refill it. */
> -			ret = vaddr_get_pfns(mm, vaddr, npage, dma->prot,
> +			ret = vaddr_get_pfns(mm, vaddr, npage, &dma->prot,
>   					     &pfn, batch);
>   			if (ret < 0)
>   				goto unpin_out;
> @@ -850,7 +854,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
>   
>   	vfio_batch_init_single(&batch);
>   
> -	ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, &batch);
> +	ret = vaddr_get_pfns(mm, vaddr, 1, &dma->prot, pfn_base, &batch);
>   	if (ret != 1)
>   		goto out;
>   

-- 
Alexey


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ