[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9412baed-0d13-dab7-0bdc-90cfdf8e92f0@arm.com>
Date: Mon, 29 Apr 2019 15:45:16 +0100
From: Robin Murphy <robin.murphy@....com>
To: Christoph Hellwig <hch@....de>
Cc: Joerg Roedel <joro@...tes.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
Tom Lendacky <thomas.lendacky@....com>,
iommu@...ts.linux-foundation.org,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 20/26] iommu/dma: Refactor iommu_dma_alloc, part 2
On 22/04/2019 18:59, Christoph Hellwig wrote:
> From: Robin Murphy <robin.murphy@....com>
Honestly I don't think anything left of my patch here...
> Apart from the iommu_dma_alloc_remap() case which remains sufficiently
> different that it's better off being self-contained, the rest of the
> logic can now be consolidated into a single flow which separates the
> logcially-distinct steps of allocating pages, getting the CPU address,
> and finally getting the IOMMU address.
...and it certainly doesn't do that any more.
It's clear that we have fundamentally different ways of reading code, so
I don't think it's productive to keep arguing personal preference - I
still find the end result here a fair bit more tolerable than before, so
if you update the commit message to reflect the actual change (at which
point there's really nothing left of my authorship) I can live with it.
Robin.
> Signed-off-by: Robin Murphy <robin.murphy@....com>
> [hch: split the page allocation into a new helper to simplify the flow]
> Signed-off-by: Christoph Hellwig <hch@....de>
> ---
> drivers/iommu/dma-iommu.c | 65 +++++++++++++++++++++------------------
> 1 file changed, 35 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
> index 9b269f0792f3..acdfe866cb29 100644
> --- a/drivers/iommu/dma-iommu.c
> +++ b/drivers/iommu/dma-iommu.c
> @@ -955,35 +955,14 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
> __iommu_dma_free(dev, size, cpu_addr);
> }
>
> -static void *iommu_dma_alloc(struct device *dev, size_t size,
> - dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
> +static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
> + struct page **pagep, gfp_t gfp, unsigned long attrs)
> {
> bool coherent = dev_is_dma_coherent(dev);
> - int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
> size_t alloc_size = PAGE_ALIGN(size);
> struct page *page = NULL;
> void *cpu_addr;
>
> - gfp |= __GFP_ZERO;
> -
> - if (gfpflags_allow_blocking(gfp) &&
> - !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
> - return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
> -
> - if (!gfpflags_allow_blocking(gfp) && !coherent) {
> - cpu_addr = dma_alloc_from_pool(alloc_size, &page, gfp);
> - if (!cpu_addr)
> - return NULL;
> -
> - *handle = __iommu_dma_map(dev, page_to_phys(page), size,
> - ioprot);
> - if (*handle == DMA_MAPPING_ERROR) {
> - dma_free_from_pool(cpu_addr, alloc_size);
> - return NULL;
> - }
> - return cpu_addr;
> - }
> -
> if (gfpflags_allow_blocking(gfp))
> page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
> get_order(alloc_size),
> @@ -993,33 +972,59 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
> if (!page)
> return NULL;
>
> - *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
> - if (*handle == DMA_MAPPING_ERROR)
> - goto out_free_pages;
> -
> if (!coherent || PageHighMem(page)) {
> pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
>
> cpu_addr = dma_common_contiguous_remap(page, alloc_size,
> VM_USERMAP, prot, __builtin_return_address(0));
> if (!cpu_addr)
> - goto out_unmap;
> + goto out_free_pages;
>
> if (!coherent)
> arch_dma_prep_coherent(page, size);
> } else {
> cpu_addr = page_address(page);
> }
> +
> + *pagep = page;
> memset(cpu_addr, 0, alloc_size);
> return cpu_addr;
> -out_unmap:
> - __iommu_dma_unmap(dev, *handle, size);
> out_free_pages:
> if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
> __free_pages(page, get_order(alloc_size));
> return NULL;
> }
>
> +static void *iommu_dma_alloc(struct device *dev, size_t size,
> + dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
> +{
> + bool coherent = dev_is_dma_coherent(dev);
> + int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
> + struct page *page = NULL;
> + void *cpu_addr;
> +
> + gfp |= __GFP_ZERO;
> +
> + if (gfpflags_allow_blocking(gfp) &&
> + !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
> + return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
> +
> + if (!gfpflags_allow_blocking(gfp) && !coherent)
> + cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
> + else
> + cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
> + if (!cpu_addr)
> + return NULL;
> +
> + *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
> + if (*handle == DMA_MAPPING_ERROR) {
> + __iommu_dma_free(dev, size, cpu_addr);
> + return NULL;
> + }
> +
> + return cpu_addr;
> +}
> +
> static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,
> unsigned long pfn, size_t size)
> {
>
Powered by blists - more mailing lists