lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aG-lr9nUhwff4GuJ@google.com>
Date: Thu, 10 Jul 2025 11:36:15 +0000
From: Pranjal Shrivastava <praan@...gle.com>
To: Nicolin Chen <nicolinc@...dia.com>
Cc: jgg@...dia.com, kevin.tian@...el.com, corbet@....net,
	bagasdotme@...il.com, will@...nel.org, robin.murphy@....com,
	joro@...tes.org, thierry.reding@...il.com, vdumpa@...dia.com,
	jonathanh@...dia.com, shuah@...nel.org, jsnitsel@...hat.com,
	nathan@...nel.org, peterz@...radead.org, yi.l.liu@...el.com,
	mshavit@...gle.com, zhangzekun11@...wei.com, iommu@...ts.linux.dev,
	linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
	linux-arm-kernel@...ts.infradead.org, linux-tegra@...r.kernel.org,
	linux-kselftest@...r.kernel.org, patches@...ts.linux.dev,
	mochs@...dia.com, alok.a.tiwari@...cle.com, vasant.hegde@....com,
	dwmw2@...radead.org, baolu.lu@...ux.intel.com
Subject: Re: [PATCH v9 16/29] iommufd/selftest: Add coverage for
 IOMMUFD_CMD_HW_QUEUE_ALLOC

On Wed, Jul 09, 2025 at 10:59:08PM -0700, Nicolin Chen wrote:
> Some simple tests for IOMMUFD_CMD_HW_QUEUE_ALLOC infrastructure covering
> the new iommufd_hw_queue_depend/undepend() helpers.
> 
> Signed-off-by: Nicolin Chen <nicolinc@...dia.com>

Reviewed-by: Pranjal Shrivastava <praan@...gle.com>

> ---
>  drivers/iommu/iommufd/iommufd_test.h          |  3 +
>  tools/testing/selftests/iommu/iommufd_utils.h | 31 ++++++
>  drivers/iommu/iommufd/selftest.c              | 97 +++++++++++++++++++
>  tools/testing/selftests/iommu/iommufd.c       | 59 +++++++++++
>  .../selftests/iommu/iommufd_fail_nth.c        |  6 ++
>  5 files changed, 196 insertions(+)
> 
> diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
> index fbf9ecb35a13..51cd744a354f 100644
> --- a/drivers/iommu/iommufd/iommufd_test.h
> +++ b/drivers/iommu/iommufd/iommufd_test.h
> @@ -265,4 +265,7 @@ struct iommu_viommu_event_selftest {
>  	__u32 virt_id;
>  };
>  
> +#define IOMMU_HW_QUEUE_TYPE_SELFTEST 0xdeadbeef
> +#define IOMMU_TEST_HW_QUEUE_MAX 2
> +
>  #endif
> diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
> index a5d4cbd089ba..9a556f99d992 100644
> --- a/tools/testing/selftests/iommu/iommufd_utils.h
> +++ b/tools/testing/selftests/iommu/iommufd_utils.h
> @@ -956,6 +956,37 @@ static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
>  		     _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id,   \
>  					     virt_id, vdev_id))
>  
> +static int _test_cmd_hw_queue_alloc(int fd, __u32 viommu_id, __u32 type,
> +				    __u32 idx, __u64 base_addr, __u64 length,
> +				    __u32 *hw_queue_id)
> +{
> +	struct iommu_hw_queue_alloc cmd = {
> +		.size = sizeof(cmd),
> +		.viommu_id = viommu_id,
> +		.type = type,
> +		.index = idx,
> +		.nesting_parent_iova = base_addr,
> +		.length = length,
> +	};
> +	int ret;
> +
> +	ret = ioctl(fd, IOMMU_HW_QUEUE_ALLOC, &cmd);
> +	if (ret)
> +		return ret;
> +	if (hw_queue_id)
> +		*hw_queue_id = cmd.out_hw_queue_id;
> +	return 0;
> +}
> +
> +#define test_cmd_hw_queue_alloc(viommu_id, type, idx, base_addr, len, out_qid) \
> +	ASSERT_EQ(0, _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx,  \
> +					      base_addr, len, out_qid))
> +#define test_err_hw_queue_alloc(_errno, viommu_id, type, idx, base_addr, len, \
> +				out_qid)                                      \
> +	EXPECT_ERRNO(_errno,                                                  \
> +		     _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
> +					      base_addr, len, out_qid))
> +
>  static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
>  				   __u32 *veventq_id, __u32 *veventq_fd)
>  {
> diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
> index 38066dfeb2e7..2189e9b119ee 100644
> --- a/drivers/iommu/iommufd/selftest.c
> +++ b/drivers/iommu/iommufd/selftest.c
> @@ -150,6 +150,8 @@ to_mock_nested(struct iommu_domain *domain)
>  struct mock_viommu {
>  	struct iommufd_viommu core;
>  	struct mock_iommu_domain *s2_parent;
> +	struct mock_hw_queue *hw_queue[IOMMU_TEST_HW_QUEUE_MAX];
> +	struct mutex queue_mutex;
>  };
>  
>  static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
> @@ -157,6 +159,19 @@ static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
>  	return container_of(viommu, struct mock_viommu, core);
>  }
>  
> +struct mock_hw_queue {
> +	struct iommufd_hw_queue core;
> +	struct mock_viommu *mock_viommu;
> +	struct mock_hw_queue *prev;
> +	u16 index;
> +};
> +
> +static inline struct mock_hw_queue *
> +to_mock_hw_queue(struct iommufd_hw_queue *hw_queue)
> +{
> +	return container_of(hw_queue, struct mock_hw_queue, core);
> +}
> +
>  enum selftest_obj_type {
>  	TYPE_IDEV,
>  };
> @@ -670,9 +685,11 @@ static void mock_viommu_destroy(struct iommufd_viommu *viommu)
>  {
>  	struct mock_iommu_device *mock_iommu = container_of(
>  		viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
> +	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
>  
>  	if (refcount_dec_and_test(&mock_iommu->users))
>  		complete(&mock_iommu->complete);
> +	mutex_destroy(&mock_viommu->queue_mutex);
>  
>  	/* iommufd core frees mock_viommu and viommu */
>  }
> @@ -764,10 +781,86 @@ static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
>  	return rc;
>  }
>  
> +static size_t mock_viommu_get_hw_queue_size(struct iommufd_viommu *viommu,
> +					    enum iommu_hw_queue_type queue_type)
> +{
> +	if (queue_type != IOMMU_HW_QUEUE_TYPE_SELFTEST)
> +		return 0;
> +	return HW_QUEUE_STRUCT_SIZE(struct mock_hw_queue, core);
> +}
> +
> +static void mock_hw_queue_destroy(struct iommufd_hw_queue *hw_queue)
> +{
> +	struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
> +	struct mock_viommu *mock_viommu = mock_hw_queue->mock_viommu;
> +
> +	mutex_lock(&mock_viommu->queue_mutex);
> +	mock_viommu->hw_queue[mock_hw_queue->index] = NULL;
> +	if (mock_hw_queue->prev)
> +		iommufd_hw_queue_undepend(mock_hw_queue, mock_hw_queue->prev,
> +					  core);
> +	mutex_unlock(&mock_viommu->queue_mutex);
> +}
> +
> +/* Test iommufd_hw_queue_depend/undepend() */
> +static int mock_hw_queue_init_phys(struct iommufd_hw_queue *hw_queue, u32 index,
> +				   phys_addr_t base_addr_pa)
> +{
> +	struct mock_viommu *mock_viommu = to_mock_viommu(hw_queue->viommu);
> +	struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
> +	struct mock_hw_queue *prev = NULL;
> +	int rc = 0;
> +
> +	if (index >= IOMMU_TEST_HW_QUEUE_MAX)
> +		return -EINVAL;
> +
> +	mutex_lock(&mock_viommu->queue_mutex);
> +
> +	if (mock_viommu->hw_queue[index]) {
> +		rc = -EEXIST;
> +		goto unlock;
> +	}
> +
> +	if (index) {
> +		prev = mock_viommu->hw_queue[index - 1];
> +		if (!prev) {
> +			rc = -EIO;
> +			goto unlock;
> +		}
> +	}
> +
> +	/*
> +	 * Test to catch a kernel bug if the core converted the physical address
> +	 * incorrectly. Let mock_domain_iova_to_phys() WARN_ON if it fails.
> +	 */
> +	if (base_addr_pa != iommu_iova_to_phys(&mock_viommu->s2_parent->domain,
> +					       hw_queue->base_addr)) {
> +		rc = -EFAULT;
> +		goto unlock;
> +	}
> +
> +	if (prev) {
> +		rc = iommufd_hw_queue_depend(mock_hw_queue, prev, core);
> +		if (rc)
> +			goto unlock;
> +	}
> +
> +	mock_hw_queue->prev = prev;
> +	mock_hw_queue->mock_viommu = mock_viommu;
> +	mock_viommu->hw_queue[index] = mock_hw_queue;
> +
> +	hw_queue->destroy = &mock_hw_queue_destroy;
> +unlock:
> +	mutex_unlock(&mock_viommu->queue_mutex);
> +	return rc;
> +}
> +
>  static struct iommufd_viommu_ops mock_viommu_ops = {
>  	.destroy = mock_viommu_destroy,
>  	.alloc_domain_nested = mock_viommu_alloc_domain_nested,
>  	.cache_invalidate = mock_viommu_cache_invalidate,
> +	.get_hw_queue_size = mock_viommu_get_hw_queue_size,
> +	.hw_queue_init_phys = mock_hw_queue_init_phys,
>  };
>  
>  static size_t mock_get_viommu_size(struct device *dev,
> @@ -784,6 +877,7 @@ static int mock_viommu_init(struct iommufd_viommu *viommu,
>  {
>  	struct mock_iommu_device *mock_iommu = container_of(
>  		viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
> +	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
>  	struct iommu_viommu_selftest data;
>  	int rc;
>  
> @@ -801,6 +895,9 @@ static int mock_viommu_init(struct iommufd_viommu *viommu,
>  	}
>  
>  	refcount_inc(&mock_iommu->users);
> +	mutex_init(&mock_viommu->queue_mutex);
> +	mock_viommu->s2_parent = to_mock_domain(parent_domain);
> +
>  	viommu->ops = &mock_viommu_ops;
>  	return 0;
>  }
> diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
> index a9dfcce5e1b2..73426de77675 100644
> --- a/tools/testing/selftests/iommu/iommufd.c
> +++ b/tools/testing/selftests/iommu/iommufd.c
> @@ -3032,6 +3032,65 @@ TEST_F(iommufd_viommu, vdevice_cache)
>  	}
>  }
>  
> +TEST_F(iommufd_viommu, hw_queue)
> +{
> +	__u64 iova = MOCK_APERTURE_START, iova2;
> +	uint32_t viommu_id = self->viommu_id;
> +	uint32_t hw_queue_id[2];
> +
> +	if (!viommu_id)
> +		SKIP(return, "Skipping test for variant no_viommu");
> +
> +	/* Fail IOMMU_HW_QUEUE_TYPE_DEFAULT */
> +	test_err_hw_queue_alloc(EOPNOTSUPP, viommu_id,
> +				IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
> +				&hw_queue_id[0]);
> +	/* Fail queue addr and length */
> +	test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
> +				0, iova, 0, &hw_queue_id[0]);
> +	test_err_hw_queue_alloc(EOVERFLOW, viommu_id,
> +				IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, ~(uint64_t)0,
> +				PAGE_SIZE, &hw_queue_id[0]);
> +	/* Fail missing iova */
> +	test_err_hw_queue_alloc(ENOENT, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
> +				0, iova, PAGE_SIZE, &hw_queue_id[0]);
> +
> +	/* Map iova */
> +	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
> +	test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
> +
> +	/* Fail index=1 and =MAX; must start from index=0 */
> +	test_err_hw_queue_alloc(EIO, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
> +				iova, PAGE_SIZE, &hw_queue_id[0]);
> +	test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
> +				IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
> +				&hw_queue_id[0]);
> +
> +	/* Allocate index=0, declare ownership of the iova */
> +	test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 0,
> +				iova, PAGE_SIZE, &hw_queue_id[0]);
> +	/* Fail duplicated index */
> +	test_err_hw_queue_alloc(EEXIST, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
> +				0, iova, PAGE_SIZE, &hw_queue_id[0]);
> +	/* Fail unmap, due to iova ownership */
> +	test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
> +	/* The 2nd page is not pinned, so it can be unmmap */
> +	test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
> +
> +	/* Allocate index=1, with an unaligned case */
> +	test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
> +				iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
> +				&hw_queue_id[1]);
> +	/* Fail to destroy, due to dependency */
> +	EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hw_queue_id[0]));
> +
> +	/* Destroy in descending order */
> +	test_ioctl_destroy(hw_queue_id[1]);
> +	test_ioctl_destroy(hw_queue_id[0]);
> +	/* Now it can unmap the first page */
> +	test_ioctl_ioas_unmap(iova, PAGE_SIZE);
> +}
> +
>  FIXTURE(iommufd_device_pasid)
>  {
>  	int fd;
> diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c
> index f7ccf1822108..41c685bbd252 100644
> --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c
> +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c
> @@ -634,6 +634,7 @@ TEST_FAIL_NTH(basic_fail_nth, device)
>  	uint32_t idev_id;
>  	uint32_t hwpt_id;
>  	uint32_t viommu_id;
> +	uint32_t hw_queue_id;
>  	uint32_t vdev_id;
>  	__u64 iova;
>  
> @@ -696,6 +697,11 @@ TEST_FAIL_NTH(basic_fail_nth, device)
>  	if (_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, 0, &vdev_id))
>  		return -1;
>  
> +	if (_test_cmd_hw_queue_alloc(self->fd, viommu_id,
> +				     IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, iova,
> +				     PAGE_SIZE, &hw_queue_id))
> +		return -1;
> +
>  	if (_test_ioctl_fault_alloc(self->fd, &fault_id, &fault_fd))
>  		return -1;
>  	close(fault_fd);
> -- 
> 2.43.0
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ