[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b67950f5-dc3d-af5c-f4ff-d8943e5a5532@arm.com>
Date: Tue, 9 Apr 2019 16:54:31 +0100
From: Robin Murphy <robin.murphy@....com>
To: Christoph Hellwig <hch@....de>
Cc: Joerg Roedel <joro@...tes.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
Tom Lendacky <thomas.lendacky@....com>,
iommu@...ts.linux-foundation.org,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 10/21] dma-iommu: move __iommu_dma_map
On 27/03/2019 08:04, Christoph Hellwig wrote:
> Moving this function up to its unmap counterpart helps to keep related
> code together for the following changes.
Reviewed-by: Robin Murphy <robin.murphy@....com>
> Signed-off-by: Christoph Hellwig <hch@....de>
> ---
> drivers/iommu/dma-iommu.c | 46 +++++++++++++++++++--------------------
> 1 file changed, 23 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 57f2d8621112..4d46beeea5b7 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -435,6 +435,29 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
> iommu_dma_free_iova(cookie, dma_addr, size);
> }
>
> +static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
> + size_t size, int prot, struct iommu_domain *domain)
> +{
> + struct iommu_dma_cookie *cookie = domain->iova_cookie;
> + size_t iova_off = 0;
> + dma_addr_t iova;
> +
> + if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
> + iova_off = iova_offset(&cookie->iovad, phys);
> + size = iova_align(&cookie->iovad, size + iova_off);
> + }
> +
> + iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
> + if (!iova)
> + return DMA_MAPPING_ERROR;
> +
> + if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
> + iommu_dma_free_iova(cookie, iova, size);
> + return DMA_MAPPING_ERROR;
> + }
> + return iova + iova_off;
> +}
> +
> static void __iommu_dma_free_pages(struct page **pages, int count)
> {
> while (count--)
> @@ -689,29 +712,6 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
> arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
> }
>
> -static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
> - size_t size, int prot, struct iommu_domain *domain)
> -{
> - struct iommu_dma_cookie *cookie = domain->iova_cookie;
> - size_t iova_off = 0;
> - dma_addr_t iova;
> -
> - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
> - iova_off = iova_offset(&cookie->iovad, phys);
> - size = iova_align(&cookie->iovad, size + iova_off);
> - }
> -
> - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
> - if (!iova)
> - return DMA_MAPPING_ERROR;
> -
> - if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
> - iommu_dma_free_iova(cookie, iova, size);
> - return DMA_MAPPING_ERROR;
> - }
> - return iova + iova_off;
> -}
> -
> static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
> unsigned long offset, size_t size, int prot)
> {
>
Powered by blists - more mailing lists