[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aKjKJhblxtnlrrPG@Asurada-Nvidia>
Date: Fri, 22 Aug 2025 12:51:02 -0700
From: Nicolin Chen <nicolinc@...dia.com>
To: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
CC: <jgg@...dia.com>, <linux-kernel@...r.kernel.org>, <robin.murphy@....com>,
<will@...nel.org>, <joro@...tes.org>, <kevin.tian@...el.com>,
<jsnitsel@...hat.com>, <vasant.hegde@....com>, <iommu@...ts.linux.dev>,
<santosh.shukla@....com>, <sairaj.arunkodilkar@....com>, <jon.grimm@....com>,
<prashanthpra@...gle.com>, <wvw@...gle.com>, <wnliu@...gle.com>,
<gptran@...gle.com>, <kpsingh@...gle.com>
Subject: Re: [PATCH 7/8] iommu/amd: Add support for nested domain allocation
On Wed, Aug 20, 2025 at 11:30:08AM +0000, Suravee Suthikulpanit wrote:
> diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
> index 46682c8ba28d..ea790a8997ee 100644
> --- a/drivers/iommu/amd/iommu.c
> +++ b/drivers/iommu/amd/iommu.c
> @@ -2616,6 +2616,7 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
> const struct iommu_user_data *user_data)
>
> {
> + struct iommu_domain *dom = ERR_PTR(-EOPNOTSUPP);
> struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
> struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
> const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
> @@ -2626,29 +2627,31 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
> if ((flags & ~supported_flags) || user_data || !is_nest_parent_supported(flags))
> return ERR_PTR(-EOPNOTSUPP);
>
> - pr_debug("%s: IOMMU devid=%#x, flags=%#x\n", __func__, dev_data->devid, flags);
> + pr_debug("%s: IOMMU devid=%#x, flags=%#x, supported_flags=%#x\n", __func__, dev_data->devid, flags, supported_flags);
>
> switch (flags & supported_flags) {
> case IOMMU_HWPT_ALLOC_DIRTY_TRACKING:
> case IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_NEST_PARENT:
> case IOMMU_HWPT_ALLOC_NEST_PARENT:
> /* Allocate domain with v1 page table for dirty tracking */
> - if (!amd_iommu_hd_support(iommu))
> - break;
> - return do_iommu_domain_alloc(dev, flags, PD_MODE_V1);
> + if (amd_iommu_hd_support(iommu))
> + dom = do_iommu_domain_alloc(dev, flags, PD_MODE_V1);
> + break;
> case IOMMU_HWPT_ALLOC_PASID:
> /* Allocate domain with v2 page table if IOMMU supports PASID. */
> - if (!amd_iommu_pasid_supported())
> - break;
> - return do_iommu_domain_alloc(dev, flags, PD_MODE_V2);
> + if (amd_iommu_pasid_supported())
> + dom = do_iommu_domain_alloc(dev, flags, PD_MODE_V2);
> + break;
> case 0:
> /* If nothing specific is required use the kernel commandline default */
> - return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
> + dom = do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
> + break;
> default:
> pr_err("%s: Unhandled flag : 0x%x\n", __func__, flags);
> break;
> }
> - return ERR_PTR(-EOPNOTSUPP);
> +
> + return dom;
These seem better to be a preparatory patch.
> @@ -3113,6 +3116,7 @@ const struct iommu_ops amd_iommu_ops = {
> .release_domain = &release_domain,
> .identity_domain = &identity_domain.domain,
> .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
> + .domain_alloc_nested = amd_iommu_domain_alloc_nested,
> .domain_alloc_sva = amd_iommu_domain_alloc_sva,
> .probe_device = amd_iommu_probe_device,
> .release_device = amd_iommu_release_device,
This will be an HWPT-based nesting support, v.s. vIOMMU-based.
If AMD wants to enable its Command/Event Buffers, I think this
should follow the vIOMMU model instead.
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2023 Advanced Micro Devices, Inc.
> + */
> +
> +#define pr_fmt(fmt) "AMD-Vi: " fmt
> +#define dev_fmt(fmt) pr_fmt(fmt)
> +
> +#include <linux/iommu.h>
> +#include <uapi/linux/iommufd.h>
> +
> +#include "amd_iommu.h"
> +#include "amd_iommu_types.h"
It seems that you already included the uapi header in "amd_iommu.h".
> +static int udata_to_iommu_hwpt_amd_v2(const struct iommu_user_data *user_data,
> + struct iommu_hwpt_amd_v2 *hwpt)
> +{
> + if (!user_data)
> + return -EINVAL;
> +
> + if (user_data->type != IOMMU_HWPT_DATA_AMD_V2)
> + return -EOPNOTSUPP;
> +
iommu_copy_struct_from_user() internally checks these two already.
> + return iommu_copy_struct_from_user(hwpt, user_data,
> + IOMMU_HWPT_DATA_AMD_V2,
> + dte);
> +}
> +
> +struct iommu_domain *
> +amd_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
> + u32 flags, const struct iommu_user_data *user_data)
> +{
> + int ret;
> + struct iommu_hwpt_amd_v2 hwpt;
> + struct protection_domain *pdom;
> +
> + if (parent->ops != amd_iommu_ops.default_domain_ops)
> + return ERR_PTR(-EINVAL);
> +
> + ret = udata_to_iommu_hwpt_amd_v2(user_data, &hwpt);
> + if (ret)
> + return ERR_PTR(ret);
> +
> + pdom = kzalloc(sizeof(*pdom), GFP_KERNEL);
> + if (IS_ERR(pdom))
> + return ERR_PTR(-ENOMEM);
> +
> + pdom->id = amd_iommu_pdom_id_alloc();
> + if (!pdom->id)
> + goto out_err;
This seems incorrect. amd_iommu_pdom_id_alloc() is a wrapper of the
ida_alloc_range() that would return -ENOMEM or -ENOSPC on failure.
Also, -EINVAL in out_err isn't nice to replace either of them.
So, I think this should be:
if (pdom->id <= 0) {
ret = pdom->id;
goto out_err;
}
> +
> + pr_debug("%s: Allocating nested domain with parent domid=%#x\n",
> + __func__, to_pdomain(parent)->id);
> +
> + spin_lock_init(&pdom->lock);
> + INIT_LIST_HEAD(&pdom->dev_list);
> + INIT_LIST_HEAD(&pdom->dev_data_list);
> + xa_init(&pdom->iommu_array);
> +
> + pdom->pd_mode = PD_MODE_V2;
> + pdom->iop.pgtbl.cfg.amd.nid = NUMA_NO_NODE;
> + pdom->parent = to_pdomain(parent);
> + pdom->domain.ops = &nested_domain_ops;
> + pdom->domain.type = IOMMU_DOMAIN_NESTED;
> + pdom->domain.geometry.aperture_start = 0;
> + pdom->domain.geometry.aperture_end = ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
> + pdom->domain.geometry.force_aperture = true;
> + pdom->domain.pgsize_bitmap = pdom->iop.pgtbl.cfg.pgsize_bitmap;
> + memcpy(&pdom->guest_hwpt, &hwpt, sizeof(struct iommu_hwpt_amd_v2));
How about just hold a "struct dev_table_entry guest_dte" in the
pdom, instead of holding a uAPI structure?
Nicolin
Powered by blists - more mailing lists