lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <86dbf286-bb0b-4beb-b26f-a74562b0ace8@intel.com>
Date: Fri, 28 Jun 2024 13:42:40 +0800
From: Yi Liu <yi.l.liu@...el.com>
To: Lu Baolu <baolu.lu@...ux.intel.com>, Joerg Roedel <joro@...tes.org>, "Will
 Deacon" <will@...nel.org>, Robin Murphy <robin.murphy@....com>, "Jason
 Gunthorpe" <jgg@...pe.ca>, Kevin Tian <kevin.tian@...el.com>
CC: David Airlie <airlied@...il.com>, Daniel Vetter <daniel@...ll.ch>, "Kalle
 Valo" <kvalo@...nel.org>, Bjorn Andersson <andersson@...nel.org>, "Mathieu
 Poirier" <mathieu.poirier@...aro.org>, Alex Williamson
	<alex.williamson@...hat.com>, <mst@...hat.com>, Jason Wang
	<jasowang@...hat.com>, Thierry Reding <thierry.reding@...il.com>, "Jonathan
 Hunter" <jonathanh@...dia.com>, Mikko Perttunen <mperttunen@...dia.com>,
	"Jeff Johnson" <quic_jjohnson@...cinc.com>, <ath10k@...ts.infradead.org>,
	<ath11k@...ts.infradead.org>, <iommu@...ts.linux.dev>,
	<dri-devel@...ts.freedesktop.org>, <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v3 15/21] iommu/vt-d: Add helper to allocate paging domain

On 2024/6/10 16:55, Lu Baolu wrote:
> The domain_alloc_user operation is currently implemented by allocating a
> paging domain using iommu_domain_alloc(). This is because it needs to fully
> initialize the domain before return. Add a helper to do this to avoid using
> iommu_domain_alloc().
> 
> Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
> ---
>   drivers/iommu/intel/iommu.c | 87 +++++++++++++++++++++++++++++++++----
>   1 file changed, 78 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> index 2e9811bf2a4e..ccde5f5972e4 100644
> --- a/drivers/iommu/intel/iommu.c
> +++ b/drivers/iommu/intel/iommu.c
> @@ -3633,6 +3633,79 @@ static struct iommu_domain blocking_domain = {
>   	}
>   };
>   
> +static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage)
> +{
> +	if (!intel_iommu_superpage)
> +		return 0;
> +
> +	if (first_stage)
> +		return cap_fl1gp_support(iommu->cap) ? 2 : 1;
> +
> +	return fls(cap_super_page_val(iommu->cap));
> +}
> +
> +static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage)
> +{
> +	struct device_domain_info *info = dev_iommu_priv_get(dev);
> +	struct intel_iommu *iommu = info->iommu;
> +	struct dmar_domain *domain;
> +	int addr_width;
> +
> +	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
> +	if (!domain)
> +		return ERR_PTR(-ENOMEM);
> +
> +	INIT_LIST_HEAD(&domain->devices);
> +	INIT_LIST_HEAD(&domain->dev_pasids);
> +	INIT_LIST_HEAD(&domain->cache_tags);
> +	spin_lock_init(&domain->lock);
> +	spin_lock_init(&domain->cache_lock);
> +	xa_init(&domain->iommu_array);
> +
> +	domain->nid = dev_to_node(dev);
> +	domain->has_iotlb_device = info->ats_enabled;
> +	domain->use_first_level = first_stage;
> +
> +	/* calculate the address width */
> +	addr_width = agaw_to_width(iommu->agaw);
> +	if (addr_width > cap_mgaw(iommu->cap))
> +		addr_width = cap_mgaw(iommu->cap);
> +	domain->gaw = addr_width;
> +	domain->agaw = iommu->agaw;
> +	domain->max_addr = __DOMAIN_MAX_ADDR(addr_width);
> +
> +	/* iommu memory access coherency */
> +	domain->iommu_coherency = iommu_paging_structure_coherency(iommu);
> +
> +	/* pagesize bitmap */
> +	domain->domain.pgsize_bitmap = SZ_4K;
> +	domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage);
> +	domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain);
> +
> +	/*
> +	 * IOVA aperture: First-level translation restricts the input-address
> +	 * to a canonical address (i.e., address bits 63:N have the same value
> +	 * as address bit [N-1], where N is 48-bits with 4-level paging and
> +	 * 57-bits with 5-level paging). Hence, skip bit [N-1].
> +	 */
> +	domain->domain.geometry.force_aperture = true;
> +	domain->domain.geometry.aperture_start = 0;
> +	if (first_stage)
> +		domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
> +	else
> +		domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
> +
> +	/* always allocate the top pgd */
> +	domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL);
> +	if (!domain->pgd) {
> +		kfree(domain);
> +		return ERR_PTR(-ENOMEM);
> +	}
> +	domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
> +
> +	return domain;
> +}
> +
>   static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
>   {
>   	struct dmar_domain *dmar_domain;
> @@ -3695,15 +3768,11 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
>   	if (user_data || (dirty_tracking && !ssads_supported(iommu)))
>   		return ERR_PTR(-EOPNOTSUPP);
>   
> -	/*
> -	 * domain_alloc_user op needs to fully initialize a domain before
> -	 * return, so uses iommu_domain_alloc() here for simple.
> -	 */
> -	domain = iommu_domain_alloc(dev->bus);
> -	if (!domain)
> -		return ERR_PTR(-ENOMEM);
> -
> -	dmar_domain = to_dmar_domain(domain);
> +	/* Do not use first stage for user domain translation. */
> +	dmar_domain = paging_domain_alloc(dev, false);

this is not an apple-to-apple replacement yet. You need to set the type,
owner and domain->ops as well.

> +	if (IS_ERR(dmar_domain))
> +		return ERR_CAST(dmar_domain);
> +	domain = &dmar_domain->domain;
>   
>   	if (nested_parent) {
>   		dmar_domain->nested_parent = true;

-- 
Regards,
Yi Liu

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ