lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 21 Aug 2019 14:16:40 +1000
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Joerg Roedel <joro@...tes.org>,
        Daniel Vetter <daniel.vetter@...ll.ch>,
        Intel Graphics <intel-gfx@...ts.freedesktop.org>,
        DRI <dri-devel@...ts.freedesktop.org>
Cc:     Linux Next Mailing List <linux-next@...r.kernel.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Rob Herring <robh@...nel.org>, Will Deacon <will@...nel.org>
Subject: linux-next: manual merge of the iommu tree with the drm-misc tree

Hi all,

Today's linux-next merge of the iommu tree got a conflict in:

  drivers/gpu/drm/panfrost/panfrost_mmu.c

between commit:

  187d2929206e ("drm/panfrost: Add support for GPU heap allocations")

from the drm-misc tree and commit:

  a2d3a382d6c6 ("iommu/io-pgtable: Pass struct iommu_iotlb_gather to ->unmap()")

from the iommu tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc drivers/gpu/drm/panfrost/panfrost_mmu.c
index 842bdd7cf6be,6e8145c36e93..000000000000
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@@ -310,18 -222,18 +310,18 @@@ void panfrost_mmu_unmap(struct panfrost
  		size_t unmapped_page;
  		size_t pgsize = get_pgsize(iova, len - unmapped_len);
  
 -		unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
 -		if (!unmapped_page)
 -			break;
 -
 -		iova += unmapped_page;
 -		unmapped_len += unmapped_page;
 +		if (ops->iova_to_phys(ops, iova)) {
- 			unmapped_page = ops->unmap(ops, iova, pgsize);
++			unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
 +			WARN_ON(unmapped_page != pgsize);
 +		}
 +		iova += pgsize;
 +		unmapped_len += pgsize;
  	}
  
 -	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
 +	mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT,
  			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
  
 -	mutex_unlock(&pfdev->mmu->lock);
 +	mutex_unlock(&bo->mmu->lock);
  
  	pm_runtime_mark_last_busy(pfdev->dev);
  	pm_runtime_put_autosuspend(pfdev->dev);
@@@ -330,184 -242,35 +330,192 @@@
  
  static void mmu_tlb_inv_context_s1(void *cookie)
  {
 -	struct panfrost_device *pfdev = cookie;
 +	struct panfrost_file_priv *priv = cookie;
  
 -	mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
 +	mmu_hw_do_operation(priv->pfdev, &priv->mmu, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
  }
  
- static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
- 				     size_t granule, bool leaf, void *cookie)
- {}
- 
  static void mmu_tlb_sync_context(void *cookie)
  {
  	//struct panfrost_device *pfdev = cookie;
  	// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
  }
  
- static const struct iommu_gather_ops mmu_tlb_ops = {
+ static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
+ 			       void *cookie)
+ {
+ 	mmu_tlb_sync_context(cookie);
+ }
+ 
+ static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
+ 			       void *cookie)
+ {
+ 	mmu_tlb_sync_context(cookie);
+ }
+ 
+ static const struct iommu_flush_ops mmu_tlb_ops = {
  	.tlb_flush_all	= mmu_tlb_inv_context_s1,
- 	.tlb_add_flush	= mmu_tlb_inv_range_nosync,
- 	.tlb_sync	= mmu_tlb_sync_context,
+ 	.tlb_flush_walk = mmu_tlb_flush_walk,
+ 	.tlb_flush_leaf = mmu_tlb_flush_leaf,
  };
  
 +int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
 +{
 +	struct panfrost_mmu *mmu = &priv->mmu;
 +	struct panfrost_device *pfdev = priv->pfdev;
 +
 +	mutex_init(&mmu->lock);
 +	INIT_LIST_HEAD(&mmu->list);
 +	mmu->as = -1;
 +
 +	mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
 +		.pgsize_bitmap	= SZ_4K | SZ_2M,
 +		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
 +		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
 +		.tlb		= &mmu_tlb_ops,
 +		.iommu_dev	= pfdev->dev,
 +	};
 +
 +	mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
 +					      priv);
 +	if (!mmu->pgtbl_ops)
 +		return -EINVAL;
 +
 +	return 0;
 +}
 +
 +void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
 +{
 +	struct panfrost_device *pfdev = priv->pfdev;
 +	struct panfrost_mmu *mmu = &priv->mmu;
 +
 +	spin_lock(&pfdev->as_lock);
 +	if (mmu->as >= 0) {
 +		clear_bit(mmu->as, &pfdev->as_alloc_mask);
 +		clear_bit(mmu->as, &pfdev->as_in_use_mask);
 +		list_del(&mmu->list);
 +	}
 +	spin_unlock(&pfdev->as_lock);
 +
 +	free_io_pgtable_ops(mmu->pgtbl_ops);
 +}
 +
 +static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
 +{
 +	struct drm_mm_node *node = NULL;
 +	u64 offset = addr >> PAGE_SHIFT;
 +	struct panfrost_mmu *mmu;
 +
 +	spin_lock(&pfdev->as_lock);
 +	list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
 +		struct panfrost_file_priv *priv;
 +		if (as != mmu->as)
 +			continue;
 +
 +		priv = container_of(mmu, struct panfrost_file_priv, mmu);
 +		drm_mm_for_each_node(node, &priv->mm) {
 +			if (offset >= node->start && offset < (node->start + node->size))
 +				goto out;
 +		}
 +	}
 +
 +out:
 +	spin_unlock(&pfdev->as_lock);
 +	return node;
 +}
 +
 +#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
 +
 +int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
 +{
 +	int ret, i;
 +	struct drm_mm_node *node;
 +	struct panfrost_gem_object *bo;
 +	struct address_space *mapping;
 +	pgoff_t page_offset;
 +	struct sg_table *sgt;
 +	struct page **pages;
 +
 +	node = addr_to_drm_mm_node(pfdev, as, addr);
 +	if (!node)
 +		return -ENOENT;
 +
 +	bo = drm_mm_node_to_panfrost_bo(node);
 +	if (!bo->is_heap) {
 +		dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
 +			 node->start << PAGE_SHIFT);
 +		return -EINVAL;
 +	}
 +	WARN_ON(bo->mmu->as != as);
 +
 +	/* Assume 2MB alignment and size multiple */
 +	addr &= ~((u64)SZ_2M - 1);
 +	page_offset = addr >> PAGE_SHIFT;
 +	page_offset -= node->start;
 +
 +	mutex_lock(&bo->base.pages_lock);
 +
 +	if (!bo->base.pages) {
 +		bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
 +				     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
 +		if (!bo->sgts) {
 +			mutex_unlock(&bo->base.pages_lock);
 +			return -ENOMEM;
 +		}
 +
 +		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
 +				       sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
 +		if (!pages) {
 +			kfree(bo->sgts);
 +			bo->sgts = NULL;
 +			mutex_unlock(&bo->base.pages_lock);
 +			return -ENOMEM;
 +		}
 +		bo->base.pages = pages;
 +		bo->base.pages_use_count = 1;
 +	} else
 +		pages = bo->base.pages;
 +
 +	mapping = bo->base.base.filp->f_mapping;
 +	mapping_set_unevictable(mapping);
 +
 +	for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
 +		pages[i] = shmem_read_mapping_page(mapping, i);
 +		if (IS_ERR(pages[i])) {
 +			mutex_unlock(&bo->base.pages_lock);
 +			ret = PTR_ERR(pages[i]);
 +			goto err_pages;
 +		}
 +	}
 +
 +	mutex_unlock(&bo->base.pages_lock);
 +
 +	sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
 +	ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
 +					NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
 +	if (ret)
 +		goto err_pages;
 +
 +	if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
 +		ret = -EINVAL;
 +		goto err_map;
 +	}
 +
 +	mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
 +
 +	bo->is_mapped = true;
 +
 +	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
 +
 +	return 0;
 +
 +err_map:
 +	sg_free_table(sgt);
 +err_pages:
 +	drm_gem_shmem_put_pages(&bo->base);
 +	return ret;
 +}
 +
  static const char *access_type_name(struct panfrost_device *pfdev,
  		u32 fault_status)
  {

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ