[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251108143710.318702ec.alex@shazbot.org>
Date: Sat, 8 Nov 2025 14:37:10 -0700
From: Alex Williamson <alex@...zbot.org>
To: Alex Mastro <amastro@...com>
Cc: David Matlack <dmatlack@...gle.com>, Alex Williamson
<alex.williamson@...hat.com>, Jason Gunthorpe <jgg@...pe.ca>,
<kvm@...r.kernel.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] vfio: selftests: Skip vfio_dma_map_limit_test if
mapping returns -EINVAL
On Sat, 8 Nov 2025 12:19:48 -0800
Alex Mastro <amastro@...com> wrote:
> On Fri, Nov 07, 2025 at 04:17:24PM -0800, Alex Mastro wrote:
> > On Fri, Nov 07, 2025 at 10:20:58PM +0000, David Matlack wrote:
> > > Skip vfio_dma_map_limit_test.{unmap_range,unmap_all} (instead of
> > > failing) on systems that do not support mapping in the page-sized region
> > > at the top of the u64 address space. Use -EINVAL as the signal for
> > > detecting systems with this limitation, as that is what both VFIO Type1
> > > and iommufd return.
> > >
> > > A more robust solution that could be considered in the future would be
> > > to explicitly check the range of supported IOVA regions and key off
> > > that, instead of inferring from -EINVAL.
> > >
> > > Fixes: de8d1f2fd5a5 ("vfio: selftests: add end of address space DMA map/unmap tests")
> > > Signed-off-by: David Matlack <dmatlack@...gle.com>
> >
> > Makes sense -- thanks David. Agree about keying this off
> > VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE longer term.
> >
> > Reviewed-by: Alex Mastro <amastro@...com>
>
> Here's my attempt at adding some machinery to query iova ranges, with
> normalization to iommufd's struct. I kept the vfio capability chain stuff
> relatively generic so we can use it for other things in the future if needed.
Seems we were both hacking on this, I hadn't seen you posted this
before sending:
https://lore.kernel.org/kvm/20251108212954.26477-1-alex@shazbot.org/T/#u
Maybe we can combine the best merits of each. Thanks,
Alex
> I can sequence this after your fix?
>
> diff --git a/tools/testing/selftests/vfio/lib/include/vfio_util.h b/tools/testing/selftests/vfio/lib/include/vfio_util.h
> index 240409bf5f8a..fb5efec52316 100644
> --- a/tools/testing/selftests/vfio/lib/include/vfio_util.h
> +++ b/tools/testing/selftests/vfio/lib/include/vfio_util.h
> @@ -4,9 +4,12 @@
>
> #include <fcntl.h>
> #include <string.h>
> -#include <linux/vfio.h>
> +
> +#include <uapi/linux/types.h>
> +#include <linux/iommufd.h>
> #include <linux/list.h>
> #include <linux/pci_regs.h>
> +#include <linux/vfio.h>
>
> #include "../../../kselftest.h"
>
> @@ -206,6 +209,9 @@ struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_
> void vfio_pci_device_cleanup(struct vfio_pci_device *device);
> void vfio_pci_device_reset(struct vfio_pci_device *device);
>
> +struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
> + size_t *nranges);
> +
> int __vfio_pci_dma_map(struct vfio_pci_device *device,
> struct vfio_dma_region *region);
> int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
> diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_device.c b/tools/testing/selftests/vfio/lib/vfio_pci_device.c
> index a381fd253aa7..3297a41fdc31 100644
> --- a/tools/testing/selftests/vfio/lib/vfio_pci_device.c
> +++ b/tools/testing/selftests/vfio/lib/vfio_pci_device.c
> @@ -29,6 +29,145 @@
> VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
> } while (0)
>
> +static struct vfio_info_cap_header *next_cap_hdr(void *buf, size_t bufsz,
> + size_t *cap_offset)
> +{
> + struct vfio_info_cap_header *hdr;
> +
> + if (!*cap_offset)
> + return NULL;
> +
> + /* Cap offset must be in bounds */
> + VFIO_ASSERT_LT(*cap_offset, bufsz);
> + /* There must be enough remaining space to contain the header */
> + VFIO_ASSERT_GE(bufsz - *cap_offset, sizeof(*hdr));
> + hdr = (struct vfio_info_cap_header *)((u8 *)buf + *cap_offset);
> + /* If there is a next, offset must monotonically increase */
> + if (hdr->next)
> + VFIO_ASSERT_GT(hdr->next, *cap_offset);
> + *cap_offset = hdr->next;
> +
> + return hdr;
> +}
> +
> +static struct vfio_info_cap_header *vfio_iommu_info_cap_hdr(struct vfio_iommu_type1_info *buf,
> + u16 cap_id)
> +{
> + struct vfio_info_cap_header *hdr;
> + size_t cap_offset = buf->cap_offset;
> +
> + if (!(buf->flags & VFIO_IOMMU_INFO_CAPS))
> + return NULL;
> +
> + if (cap_offset)
> + VFIO_ASSERT_GE(cap_offset, sizeof(struct vfio_iommu_type1_info));
> +
> + while ((hdr = next_cap_hdr(buf, buf->argsz, &cap_offset))) {
> + if (hdr->id == cap_id)
> + return hdr;
> + }
> +
> + return NULL;
> +}
> +
> +/* Return buffer including capability chain, if present. Free with free() */
> +static struct vfio_iommu_type1_info *vfio_iommu_info_buf(struct vfio_pci_device *device)
> +{
> + struct vfio_iommu_type1_info *buf;
> +
> + buf = malloc(sizeof(*buf));
> + VFIO_ASSERT_NOT_NULL(buf);
> +
> + *buf = (struct vfio_iommu_type1_info) {
> + .argsz = sizeof(*buf),
> + };
> +
> + ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, buf);
> +
> + buf = realloc(buf, buf->argsz);
> + VFIO_ASSERT_NOT_NULL(buf);
> +
> + ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, buf);
> +
> + return buf;
> +}
> +
> +/*
> + * Normalize vfio_iommu_type1 to report iommufd's iommu_iova_range. Free with
> + * free().
> + */
> +static struct iommu_iova_range *vfio_iommu_iova_ranges(struct vfio_pci_device *device,
> + size_t *nranges)
> +{
> + struct vfio_iommu_type1_info_cap_iova_range *cap_range;
> + struct vfio_iommu_type1_info *buf;
> + struct vfio_info_cap_header *hdr;
> + struct iommu_iova_range *ranges = NULL;
> +
> + buf = vfio_iommu_info_buf(device);
> + VFIO_ASSERT_NOT_NULL(buf);
> +
> + hdr = vfio_iommu_info_cap_hdr(buf, VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
> + if (!hdr)
> + goto free_buf;
> +
> + cap_range = container_of(hdr, struct vfio_iommu_type1_info_cap_iova_range, header);
> + if (!cap_range->nr_iovas)
> + goto free_buf;
> +
> + ranges = malloc(cap_range->nr_iovas * sizeof(*ranges));
> + VFIO_ASSERT_NOT_NULL(ranges);
> +
> + for (u32 i = 0; i < cap_range->nr_iovas; i++) {
> + ranges[i] = (struct iommu_iova_range){
> + .start = cap_range->iova_ranges[i].start,
> + .last = cap_range->iova_ranges[i].end,
> + };
> + }
> +
> + *nranges = cap_range->nr_iovas;
> +
> +free_buf:
> + free(buf);
> + return ranges;
> +}
> +
> +struct iommu_iova_range *iommufd_iova_ranges(struct vfio_pci_device *device,
> + size_t *nranges)
> +{
> + struct iommu_iova_range *ranges;
> + int ret;
> +
> + struct iommu_ioas_iova_ranges query = {
> + .size = sizeof(query),
> + .ioas_id = device->ioas_id,
> + };
> +
> + ret = ioctl(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
> + VFIO_ASSERT_EQ(ret, -1);
> + VFIO_ASSERT_EQ(errno, EMSGSIZE);
> + VFIO_ASSERT_GT(query.num_iovas, 0);
> +
> + ranges = malloc(query.num_iovas * sizeof(*ranges));
> + VFIO_ASSERT_NOT_NULL(ranges);
> +
> + query.allowed_iovas = (uintptr_t)ranges;
> +
> + ioctl_assert(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
> + *nranges = query.num_iovas;
> +
> + return ranges;
> +}
> +
> +struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
> + size_t *nranges)
> +{
> + if (device->iommufd)
> + return iommufd_iova_ranges(device, nranges);
> +
> + return vfio_iommu_iova_ranges(device, nranges);
> +}
> +
> iova_t __to_iova(struct vfio_pci_device *device, void *vaddr)
> {
> struct vfio_dma_region *region;
> diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
> index 4f1ea79a200c..78983c4c293b 100644
> --- a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
> +++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
> @@ -3,6 +3,8 @@
> #include <sys/mman.h>
> #include <unistd.h>
>
> +#include <uapi/linux/types.h>
> +#include <linux/iommufd.h>
> #include <linux/limits.h>
> #include <linux/mman.h>
> #include <linux/sizes.h>
> @@ -243,12 +245,31 @@ FIXTURE_TEARDOWN(vfio_dma_map_limit_test)
> ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0);
> }
>
> +static iova_t last_legal_iova(struct vfio_pci_device *device)
> +{
> + struct iommu_iova_range *ranges;
> + size_t nranges;
> + iova_t ret;
> +
> + ranges = vfio_pci_iova_ranges(device, &nranges);
> + VFIO_ASSERT_NOT_NULL(ranges);
> +
> + ret = ranges[nranges - 1].last;
> + free(ranges);
> +
> + return ret;
> +}
> +
> TEST_F(vfio_dma_map_limit_test, unmap_range)
> {
> + iova_t last_iova = last_legal_iova(self->device);
> struct vfio_dma_region *region = &self->region;
> u64 unmapped;
> int rc;
>
> + if (last_iova != ~(iova_t)0)
> + SKIP(return, "last legal iova=0x%lx\n", last_iova);
> +
> vfio_pci_dma_map(self->device, region);
> ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
>
> @@ -259,10 +280,14 @@ TEST_F(vfio_dma_map_limit_test, unmap_range)
>
> TEST_F(vfio_dma_map_limit_test, unmap_all)
> {
> + iova_t last_iova = last_legal_iova(self->device);
> struct vfio_dma_region *region = &self->region;
> u64 unmapped;
> int rc;
>
> + if (last_iova != ~(iova_t)0)
> + SKIP(return, "last legal iova=0x%lx\n", last_iova);
> +
> vfio_pci_dma_map(self->device, region);
> ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
>
>
Powered by blists - more mailing lists