[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251124083237.26c92d2b.alex@shazbot.org>
Date: Mon, 24 Nov 2025 08:32:37 -0700
From: Alex Williamson <alex@...zbot.org>
To: <ankita@...dia.com>
Cc: <jgg@...pe.ca>, <yishaih@...dia.com>, <skolothumtho@...dia.com>,
<kevin.tian@...el.com>, <aniketa@...dia.com>, <vsethi@...dia.com>,
<mochs@...dia.com>, <Yunxiang.Li@....com>, <yi.l.liu@...el.com>,
<zhangdongdong@...incomputing.com>, <avihaih@...dia.com>,
<bhelgaas@...gle.com>, <peterx@...hat.com>, <pstanner@...hat.com>,
<apopple@...dia.com>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <cjia@...dia.com>, <kwankhede@...dia.com>,
<targupta@...dia.com>, <zhiw@...dia.com>, <danw@...dia.com>,
<dnigam@...dia.com>, <kjaju@...dia.com>
Subject: Re: [PATCH v5 3/7] vfio/nvgrace-gpu: Add support for huge pfnmap
On Mon, 24 Nov 2025 11:59:22 +0000
<ankita@...dia.com> wrote:
> From: Ankit Agrawal <ankita@...dia.com>
>
> NVIDIA's Grace based systems have large device memory. The device
> memory is mapped as VM_PFNMAP in the VMM VMA. The nvgrace-gpu
> module could make use of the huge PFNMAP support added in mm [1].
>
> To achieve this, nvgrace-gpu module is updated to implement huge_fault ops.
> The implementation establishes mapping according to the order request.
> Note that if the PFN or the VMA address is unaligned to the order, the
> mapping fallbacks to the PTE level.
>
> Link: https://lore.kernel.org/all/20240826204353.2228736-1-peterx@redhat.com/ [1]
>
> cc: Alex Williamson <alex@...zbot.org>
> cc: Jason Gunthorpe <jgg@...pe.ca>
> cc: Vikram Sethi <vsethi@...dia.com>
> Signed-off-by: Ankit Agrawal <ankita@...dia.com>
> ---
> drivers/vfio/pci/nvgrace-gpu/main.c | 43 +++++++++++++++++++++++------
> 1 file changed, 35 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c
> index f74f3d8e1ebe..c84c01954c9e 100644
> --- a/drivers/vfio/pci/nvgrace-gpu/main.c
> +++ b/drivers/vfio/pci/nvgrace-gpu/main.c
> @@ -130,32 +130,58 @@ static void nvgrace_gpu_close_device(struct vfio_device *core_vdev)
> vfio_pci_core_close_device(core_vdev);
> }
>
> -static vm_fault_t nvgrace_gpu_vfio_pci_fault(struct vm_fault *vmf)
> +static vm_fault_t nvgrace_gpu_vfio_pci_huge_fault(struct vm_fault *vmf,
> + unsigned int order)
> {
> struct vm_area_struct *vma = vmf->vma;
> struct nvgrace_gpu_pci_core_device *nvdev = vma->vm_private_data;
> int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
> vm_fault_t ret = VM_FAULT_SIGBUS;
> struct mem_region *memregion;
> - unsigned long pgoff, pfn;
> + unsigned long pgoff, pfn, addr;
>
> memregion = nvgrace_gpu_memregion(index, nvdev);
> if (!memregion)
> return ret;
>
> - pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
> + addr = vmf->address & ~((PAGE_SIZE << order) - 1);
> + pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
> pfn = PHYS_PFN(memregion->memphys) + pgoff;
>
> + if (order && (addr < vma->vm_start ||
> + addr + (PAGE_SIZE << order) > vma->vm_end ||
> + pfn & ((1 << order) - 1)))
> + return VM_FAULT_FALLBACK;
> +
> scoped_guard(rwsem_read, &nvdev->core_device.memory_lock)
> - ret = vmf_insert_pfn(vmf->vma, vmf->address, pfn);
> + ret = vfio_pci_vmf_insert_pfn(vmf, pfn, order);
>
> return ret;
> }
It may be worth considering replicating the dev_dbg from
vfio_pci_mmap_huge_fault(), it's been very useful in validating that
we're getting the huge PFNMAPs we expect. Thanks,
Alex
>
> +static vm_fault_t nvgrace_gpu_vfio_pci_fault(struct vm_fault *vmf)
> +{
> + return nvgrace_gpu_vfio_pci_huge_fault(vmf, 0);
> +}
> +
> static const struct vm_operations_struct nvgrace_gpu_vfio_pci_mmap_ops = {
> .fault = nvgrace_gpu_vfio_pci_fault,
> +#ifdef CONFIG_ARCH_SUPPORTS_HUGE_PFNMAP
> + .huge_fault = nvgrace_gpu_vfio_pci_huge_fault,
> +#endif
> };
>
> +static size_t nvgrace_gpu_aligned_devmem_size(size_t memlength)
> +{
> +#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
> + return ALIGN(memlength, PMD_SIZE);
> +#endif
> +#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
> + return ALIGN(memlength, PUD_SIZE);
> +#endif
> + return memlength;
> +}
> +
> static int nvgrace_gpu_mmap(struct vfio_device *core_vdev,
> struct vm_area_struct *vma)
> {
> @@ -185,10 +211,10 @@ static int nvgrace_gpu_mmap(struct vfio_device *core_vdev,
> return -EOVERFLOW;
>
> /*
> - * Check that the mapping request does not go beyond available device
> - * memory size
> + * Check that the mapping request does not go beyond the exposed
> + * device memory size.
> */
> - if (end > memregion->memlength)
> + if (end > nvgrace_gpu_aligned_devmem_size(memregion->memlength))
> return -EINVAL;
>
> vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
> @@ -258,7 +284,8 @@ nvgrace_gpu_ioctl_get_region_info(struct vfio_device *core_vdev,
>
> sparse->nr_areas = 1;
> sparse->areas[0].offset = 0;
> - sparse->areas[0].size = memregion->memlength;
> + sparse->areas[0].size =
> + nvgrace_gpu_aligned_devmem_size(memregion->memlength);
> sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
> sparse->header.version = 1;
>
Powered by blists - more mailing lists