[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250917103644.GB6464@unreal>
Date: Wed, 17 Sep 2025 13:36:44 +0300
From: Leon Romanovsky <leon@...nel.org>
To: Jason Gunthorpe <jgg@...dia.com>
Cc: Marek Szyprowski <m.szyprowski@...sung.com>, iommu@...ts.linux.dev,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Russell King <linux@...linux.org.uk>
Subject: Re: [PATCH v3 3/4] ARM: dma-mapping: Switch to physical address
mapping callbacks
On Tue, Sep 16, 2025 at 03:46:17PM -0300, Jason Gunthorpe wrote:
> On Tue, Sep 16, 2025 at 10:32:06AM +0300, Leon Romanovsky wrote:
> > + if (!dev->dma_coherent &&
> > + !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
> > + __dma_page_cpu_to_dev(phys_to_page(phys), offset, size, dir);
>
> I'd keep going and get rid of the page here too, maybe as a second
> patch in this series:
Thanks, it is always unclear how far to go with cleanups.
>
> diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
> index 88c2d68a69c9ee..a84d12cd0ba4a9 100644
> --- a/arch/arm/mm/dma-mapping.c
> +++ b/arch/arm/mm/dma-mapping.c
> @@ -624,16 +624,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
> kfree(buf);
> }
>
> -static void dma_cache_maint_page(struct page *page, unsigned long offset,
> +static void dma_cache_maint_page(phys_addr_t paddr,
> size_t size, enum dma_data_direction dir,
> void (*op)(const void *, size_t, int))
> {
> - unsigned long pfn;
> + unsigned long pfn = paddr / PAGE_SIZE;
> + unsigned int offset = paddr % PAGE_SIZE;
> size_t left = size;
>
> - pfn = page_to_pfn(page) + offset / PAGE_SIZE;
> - offset %= PAGE_SIZE;
> -
> /*
> * A single sg entry may refer to multiple physically contiguous
> * pages. But we still need to process highmem pages individually.
> @@ -644,17 +642,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
> size_t len = left;
> void *vaddr;
>
> - page = pfn_to_page(pfn);
> -
> - if (PageHighMem(page)) {
> + if (PhysHighMem(pfn << PAGE_SHIFT)) {
> if (len + offset > PAGE_SIZE)
> len = PAGE_SIZE - offset;
>
> if (cache_is_vipt_nonaliasing()) {
> - vaddr = kmap_atomic(page);
> + vaddr = kmap_atomic_pfn(pfn);
> op(vaddr + offset, len, dir);
> kunmap_atomic(vaddr);
> } else {
> + struct page *page = pfn_to_page(pfn);
> +
> vaddr = kmap_high_get(page);
> if (vaddr) {
> op(vaddr + offset, len, dir);
> @@ -662,7 +660,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
> }
> }
> } else {
> - vaddr = page_address(page) + offset;
> + vaddr = phys_to_virt(pfn) + offset;
> op(vaddr, len, dir);
> }
> offset = 0;
> @@ -676,14 +674,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
> * Note: Drivers should NOT use this function directly.
> * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
> */
> -static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
> +static void __dma_page_cpu_to_dev(phys_addr_t paddr,
> size_t size, enum dma_data_direction dir)
> {
> - phys_addr_t paddr;
> + dma_cache_maint_page(paddr, size, dir, dmac_map_area);
>
> - dma_cache_maint_page(page, off, size, dir, dmac_map_area);
> -
> - paddr = page_to_phys(page) + off;
> if (dir == DMA_FROM_DEVICE) {
> outer_inv_range(paddr, paddr + size);
> } else {
>
> > + if (!dev->dma_coherent &&
> > + !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
> > page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
> > __dma_page_dev_to_cpu(page, offset, size, dir);
>
> Same treatment here..
>
> Looks Ok though, I didn't notice any pitfalls
>
> Reviewed-by: Jason Gunthorpe <jgg@...dia.com>
>
> Jason
>
Powered by blists - more mailing lists