[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <449e0631-aa32-d4a8-9fa9-07234ae1e533@arm.com>
Date: Fri, 30 Nov 2018 19:05:33 +0000
From: Robin Murphy <robin.murphy@....com>
To: Christoph Hellwig <hch@....de>, iommu@...ts.linux-foundation.org
Cc: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>, Guo Ren <ren_guo@...ky.com>,
Laura Abbott <labbott@...hat.com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 5/9] dma-mapping: support highmem in the generic remap
allocator
On 05/11/2018 12:19, Christoph Hellwig wrote:
> By using __dma_direct_alloc_pages we can deal entirely with struct page
> instead of having to derive a kernel virtual address.
Simple enough :)
Reviewed-by: Robin Murphy <robin.murphy@....com>
> Signed-off-by: Christoph Hellwig <hch@....de>
> ---
> kernel/dma/remap.c | 14 +++++++-------
> 1 file changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
> index bc42766f52df..8f1fca34b894 100644
> --- a/kernel/dma/remap.c
> +++ b/kernel/dma/remap.c
> @@ -196,7 +196,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> gfp_t flags, unsigned long attrs)
> {
> struct page *page = NULL;
> - void *ret, *kaddr;
> + void *ret;
>
> size = PAGE_ALIGN(size);
>
> @@ -208,10 +208,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> return ret;
> }
>
> - kaddr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
> - if (!kaddr)
> + page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
> + if (!page)
> return NULL;
> - page = virt_to_page(kaddr);
>
> /* remove any dirty cache lines on the kernel alias */
> arch_dma_prep_coherent(page, size);
> @@ -221,7 +220,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
> arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
> __builtin_return_address(0));
> if (!ret)
> - dma_direct_free_pages(dev, size, kaddr, *dma_handle, attrs);
> + __dma_direct_free_pages(dev, size, page);
> return ret;
> }
>
> @@ -229,10 +228,11 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
> dma_addr_t dma_handle, unsigned long attrs)
> {
> if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
> - void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
> + phys_addr_t phys = dma_to_phys(dev, dma_handle);
> + struct page *page = pfn_to_page(__phys_to_pfn(phys));
>
> vunmap(vaddr);
> - dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
> + __dma_direct_free_pages(dev, size, page);
> }
> }
>
>
Powered by blists - more mailing lists