lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <5de68173-5673-f834-c2aa-ec8cffc6702a@deltatee.com>
Date:   Wed, 13 Feb 2019 10:57:25 -0700
From:   Logan Gunthorpe <logang@...tatee.com>
To:     linux-kernel@...r.kernel.org, linux-ntb@...glegroups.com,
        linux-pci@...r.kernel.org, iommu@...ts.linux-foundation.org,
        linux-kselftest@...r.kernel.org, Jon Mason <jdmason@...zu.us>,
        Bjorn Helgaas <bhelgaas@...gle.com>,
        Joerg Roedel <joro@...tes.org>
Cc:     Allen Hubbe <allenbh@...il.com>, Dave Jiang <dave.jiang@...el.com>,
        Serge Semin <fancer.lancer@...il.com>,
        Eric Pilmore <epilmore@...aio.com>,
        David Woodhouse <dwmw2@...radead.org>
Subject: Re: [PATCH v2 01/12] iommu/vt-d: Implement dma_[un]map_resource()

Oops, sorry. Please ignore the first two patches in this series. They
have already been merged independently.

Logan



On 2019-02-13 10:54 a.m., Logan Gunthorpe wrote:
> Currently the Intel IOMMU uses the default dma_[un]map_resource()
> implementations does nothing and simply returns the physical address
> unmodified.
> 
> However, this doesn't create the IOVA entries necessary for addresses
> mapped this way to work when the IOMMU is enabled. Thus, when the
> IOMMU is enabled, drivers relying on dma_map_resource() will trigger
> DMAR errors. We see this when running ntb_transport with the IOMMU
> enabled, DMA, and switchtec hardware.
> 
> The implementation for intel_map_resource() is nearly identical to
> intel_map_page(), we just have to re-create __intel_map_single().
> dma_unmap_resource() uses intel_unmap_page() directly as the
> functions are identical.
> 
> Signed-off-by: Logan Gunthorpe <logang@...tatee.com>
> Cc: David Woodhouse <dwmw2@...radead.org>
> Cc: Joerg Roedel <joro@...tes.org>
> ---
>  drivers/iommu/intel-iommu.c | 23 ++++++++++++++++-------
>  1 file changed, 16 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
> index 78188bf7e90d..ad737e16575b 100644
> --- a/drivers/iommu/intel-iommu.c
> +++ b/drivers/iommu/intel-iommu.c
> @@ -3649,11 +3649,9 @@ static int iommu_no_mapping(struct device *dev)
>  	return 0;
>  }
>  
> -static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
> -				   unsigned long offset, size_t size, int dir,
> -				   u64 dma_mask)
> +static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
> +				     size_t size, int dir, u64 dma_mask)
>  {
> -	phys_addr_t paddr = page_to_phys(page) + offset;
>  	struct dmar_domain *domain;
>  	phys_addr_t start_paddr;
>  	unsigned long iova_pfn;
> @@ -3715,7 +3713,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
>  				 enum dma_data_direction dir,
>  				 unsigned long attrs)
>  {
> -	return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
> +	return __intel_map_single(dev, page_to_phys(page) + offset, size,
> +				  dir, *dev->dma_mask);
> +}
> +
> +static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
> +				     size_t size, enum dma_data_direction dir,
> +				     unsigned long attrs)
> +{
> +	return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
>  }
>  
>  static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
> @@ -3806,8 +3812,9 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
>  		return NULL;
>  	memset(page_address(page), 0, size);
>  
> -	*dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
> -				       dev->coherent_dma_mask);
> +	*dma_handle = __intel_map_single(dev, page_to_phys(page), size,
> +					 DMA_BIDIRECTIONAL,
> +					 dev->coherent_dma_mask);
>  	if (*dma_handle != DMA_MAPPING_ERROR)
>  		return page_address(page);
>  	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
> @@ -3924,6 +3931,8 @@ static const struct dma_map_ops intel_dma_ops = {
>  	.unmap_sg = intel_unmap_sg,
>  	.map_page = intel_map_page,
>  	.unmap_page = intel_unmap_page,
> +	.map_resource = intel_map_resource,
> +	.unmap_resource = intel_unmap_page,
>  	.dma_supported = dma_direct_supported,
>  };
>  
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ