[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID:
<SI2PR01MB4393BFD04F698501F19013B4DCCDA@SI2PR01MB4393.apcprd01.prod.exchangelabs.com>
Date: Thu, 13 Nov 2025 23:54:07 +0800
From: Wei Wang <wei.w.wang@...mail.com>
To: alex@...zbot.org,
jgg@...dia.com,
thomas.lendacky@....com,
vasant.hegde@....com,
suravee.suthikulpanit@....com,
joro@...tes.org
Cc: aik@....com,
kevin.tian@...el.com,
wei.w.wang@...mail.com,
linux-kernel@...r.kernel.org,
iommu@...ts.linux.dev
Subject: [PATCH v3 2/2] vfio/type1: Set IOMMU_MMIO in dma->prot for MMIO-backed addresses
Before requesting the IOMMU driver to map an IOVA to a physical address,
set the IOMMU_MMIO flag in dma->prot when the physical address corresponds
to MMIO. This allows the IOMMU driver to handle MMIO mappings specially.
For example, on AMD CPUs with SME enabled, the IOMMU driver avoids setting
the C-bit if iommu_map() is called with IOMMU_MMIO set in prot. This
prevents issues with PCIe P2P communication when IOVA is used.
Signed-off-by: Wei Wang <wei.w.wang@...mail.com>
---
drivers/vfio/vfio_iommu_type1.c | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 5167bec14e36..cde7cfcfd61e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -583,7 +583,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
* returned initial pfn are provided; subsequent pfns are contiguous.
*/
static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
- unsigned long npages, int prot, unsigned long *pfn,
+ unsigned long npages, int *prot, unsigned long *pfn,
struct vfio_batch *batch)
{
unsigned long pin_pages = min_t(unsigned long, npages, batch->capacity);
@@ -591,7 +591,7 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
unsigned int flags = 0;
long ret;
- if (prot & IOMMU_WRITE)
+ if (*prot & IOMMU_WRITE)
flags |= FOLL_WRITE;
mmap_read_lock(mm);
@@ -601,6 +601,7 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
*pfn = page_to_pfn(batch->pages[0]);
batch->size = ret;
batch->offset = 0;
+ *prot &= ~IOMMU_MMIO;
goto done;
} else if (!ret) {
ret = -EFAULT;
@@ -615,7 +616,7 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
unsigned long addr_mask;
ret = follow_fault_pfn(vma, mm, vaddr, pfn, &addr_mask,
- prot & IOMMU_WRITE);
+ *prot & IOMMU_WRITE);
if (ret == -EAGAIN)
goto retry;
@@ -629,6 +630,8 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
ret = -EFAULT;
}
}
+ if (vma->vm_flags & VM_IO)
+ *prot |= IOMMU_MMIO;
}
done:
mmap_read_unlock(mm);
@@ -709,7 +712,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
cond_resched();
/* Empty batch, so refill it. */
- ret = vaddr_get_pfns(mm, vaddr, npage, dma->prot,
+ ret = vaddr_get_pfns(mm, vaddr, npage, &dma->prot,
&pfn, batch);
if (ret < 0)
goto unpin_out;
@@ -850,7 +853,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
vfio_batch_init_single(&batch);
- ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, &batch);
+ ret = vaddr_get_pfns(mm, vaddr, 1, &dma->prot, pfn_base, &batch);
if (ret != 1)
goto out;
--
2.51.1
Powered by blists - more mailing lists