[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <afaf49d9-5465-4b1a-dac1-91688ba4abbf@ozlabs.ru>
Date: Mon, 19 Oct 2020 13:25:02 +1100
From: Alexey Kardashevskiy <aik@...abs.ru>
To: Christoph Hellwig <hch@....de>, iommu@...ts.linux-foundation.org
Cc: linux-arch@...r.kernel.org, Sekhar Nori <nsekhar@...com>,
Russell King <linux@...linux.org.uk>,
linux-kernel@...r.kernel.org, Robin Murphy <robin.murphy@....com>,
linux-arm-kernel@...ts.infradead.org,
Michael Ellerman <mpe@...erman.id.au>
Subject: Re: [PATCH 8/9] dma-mapping: move large parts of <linux/dma-direct.h>
to kernel/dma
On 30/09/2020 18:55, Christoph Hellwig wrote:
> Most of the dma_direct symbols should only be used by direct.c and
> mapping.c, so move them to kernel/dma. In fact more of dma-direct.h
> should eventually move, but that will require more coordination with
> other subsystems.
Because of this change,
http://patchwork.ozlabs.org/project/linuxppc-dev/patch/20200713062348.100552-1-aik@ozlabs.ru/
does not work anymore.
Should I send a patch moving
dma_direct_map_sg/dma_direct_map_page/+unmap back to include/ or there
is a better idea? thanks,
>
> Signed-off-by: Christoph Hellwig <hch@....de>
> ---
> include/linux/dma-direct.h | 106 ---------------------------------
> kernel/dma/direct.c | 2 +-
> kernel/dma/direct.h | 119 +++++++++++++++++++++++++++++++++++++
> kernel/dma/mapping.c | 2 +-
> 4 files changed, 121 insertions(+), 108 deletions(-)
> create mode 100644 kernel/dma/direct.h
>
> diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
> index 38ed3b55034d50..a2d6640c42c04e 100644
> --- a/include/linux/dma-direct.h
> +++ b/include/linux/dma-direct.h
> @@ -120,114 +120,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
> void dma_direct_free_pages(struct device *dev, size_t size,
> struct page *page, dma_addr_t dma_addr,
> enum dma_data_direction dir);
> -int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
> - void *cpu_addr, dma_addr_t dma_addr, size_t size,
> - unsigned long attrs);
> -bool dma_direct_can_mmap(struct device *dev);
> -int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
> - void *cpu_addr, dma_addr_t dma_addr, size_t size,
> - unsigned long attrs);
> int dma_direct_supported(struct device *dev, u64 mask);
> -bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
> -int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
> - enum dma_data_direction dir, unsigned long attrs);
> dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
> size_t size, enum dma_data_direction dir, unsigned long attrs);
> -size_t dma_direct_max_mapping_size(struct device *dev);
>
> -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
> - defined(CONFIG_SWIOTLB)
> -void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> - int nents, enum dma_data_direction dir);
> -#else
> -static inline void dma_direct_sync_sg_for_device(struct device *dev,
> - struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> -{
> -}
> -#endif
> -
> -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
> - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
> - defined(CONFIG_SWIOTLB)
> -void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> - int nents, enum dma_data_direction dir, unsigned long attrs);
> -void dma_direct_sync_sg_for_cpu(struct device *dev,
> - struct scatterlist *sgl, int nents, enum dma_data_direction dir);
> -#else
> -static inline void dma_direct_unmap_sg(struct device *dev,
> - struct scatterlist *sgl, int nents, enum dma_data_direction dir,
> - unsigned long attrs)
> -{
> -}
> -static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
> - struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> -{
> -}
> -#endif
> -
> -static inline void dma_direct_sync_single_for_device(struct device *dev,
> - dma_addr_t addr, size_t size, enum dma_data_direction dir)
> -{
> - phys_addr_t paddr = dma_to_phys(dev, addr);
> -
> - if (unlikely(is_swiotlb_buffer(paddr)))
> - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
> -
> - if (!dev_is_dma_coherent(dev))
> - arch_sync_dma_for_device(paddr, size, dir);
> -}
> -
> -static inline void dma_direct_sync_single_for_cpu(struct device *dev,
> - dma_addr_t addr, size_t size, enum dma_data_direction dir)
> -{
> - phys_addr_t paddr = dma_to_phys(dev, addr);
> -
> - if (!dev_is_dma_coherent(dev)) {
> - arch_sync_dma_for_cpu(paddr, size, dir);
> - arch_sync_dma_for_cpu_all();
> - }
> -
> - if (unlikely(is_swiotlb_buffer(paddr)))
> - swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
> -
> - if (dir == DMA_FROM_DEVICE)
> - arch_dma_mark_clean(paddr, size);
> -}
> -
> -static inline dma_addr_t dma_direct_map_page(struct device *dev,
> - struct page *page, unsigned long offset, size_t size,
> - enum dma_data_direction dir, unsigned long attrs)
> -{
> - phys_addr_t phys = page_to_phys(page) + offset;
> - dma_addr_t dma_addr = phys_to_dma(dev, phys);
> -
> - if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> - return swiotlb_map(dev, phys, size, dir, attrs);
> -
> - if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> - if (swiotlb_force != SWIOTLB_NO_FORCE)
> - return swiotlb_map(dev, phys, size, dir, attrs);
> -
> - dev_WARN_ONCE(dev, 1,
> - "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> - &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> - return DMA_MAPPING_ERROR;
> - }
> -
> - if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> - arch_sync_dma_for_device(phys, size, dir);
> - return dma_addr;
> -}
> -
> -static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
> - size_t size, enum dma_data_direction dir, unsigned long attrs)
> -{
> - phys_addr_t phys = dma_to_phys(dev, addr);
> -
> - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> - dma_direct_sync_single_for_cpu(dev, addr, size, dir);
> -
> - if (unlikely(is_swiotlb_buffer(phys)))
> - swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
> -}
> #endif /* _LINUX_DMA_DIRECT_H */
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 87697c86f0b82a..bf9f77623022bb 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -7,13 +7,13 @@
> #include <linux/memblock.h> /* for max_pfn */
> #include <linux/export.h>
> #include <linux/mm.h>
> -#include <linux/dma-direct.h>
> #include <linux/dma-map-ops.h>
> #include <linux/scatterlist.h>
> #include <linux/pfn.h>
> #include <linux/vmalloc.h>
> #include <linux/set_memory.h>
> #include <linux/slab.h>
> +#include "direct.h"
>
> /*
> * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
> diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
> new file mode 100644
> index 00000000000000..b9861557873768
> --- /dev/null
> +++ b/kernel/dma/direct.h
> @@ -0,0 +1,119 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2018 Christoph Hellwig.
> + *
> + * DMA operations that map physical memory directly without using an IOMMU.
> + */
> +#ifndef _KERNEL_DMA_DIRECT_H
> +#define _KERNEL_DMA_DIRECT_H
> +
> +#include <linux/dma-direct.h>
> +
> +int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
> + void *cpu_addr, dma_addr_t dma_addr, size_t size,
> + unsigned long attrs);
> +bool dma_direct_can_mmap(struct device *dev);
> +int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
> + void *cpu_addr, dma_addr_t dma_addr, size_t size,
> + unsigned long attrs);
> +bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
> +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
> + enum dma_data_direction dir, unsigned long attrs);
> +size_t dma_direct_max_mapping_size(struct device *dev);
> +
> +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
> + defined(CONFIG_SWIOTLB)
> +void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
> + int nents, enum dma_data_direction dir);
> +#else
> +static inline void dma_direct_sync_sg_for_device(struct device *dev,
> + struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> +{
> +}
> +#endif
> +
> +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
> + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
> + defined(CONFIG_SWIOTLB)
> +void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
> + int nents, enum dma_data_direction dir, unsigned long attrs);
> +void dma_direct_sync_sg_for_cpu(struct device *dev,
> + struct scatterlist *sgl, int nents, enum dma_data_direction dir);
> +#else
> +static inline void dma_direct_unmap_sg(struct device *dev,
> + struct scatterlist *sgl, int nents, enum dma_data_direction dir,
> + unsigned long attrs)
> +{
> +}
> +static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
> + struct scatterlist *sgl, int nents, enum dma_data_direction dir)
> +{
> +}
> +#endif
> +
> +static inline void dma_direct_sync_single_for_device(struct device *dev,
> + dma_addr_t addr, size_t size, enum dma_data_direction dir)
> +{
> + phys_addr_t paddr = dma_to_phys(dev, addr);
> +
> + if (unlikely(is_swiotlb_buffer(paddr)))
> + swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
> +
> + if (!dev_is_dma_coherent(dev))
> + arch_sync_dma_for_device(paddr, size, dir);
> +}
> +
> +static inline void dma_direct_sync_single_for_cpu(struct device *dev,
> + dma_addr_t addr, size_t size, enum dma_data_direction dir)
> +{
> + phys_addr_t paddr = dma_to_phys(dev, addr);
> +
> + if (!dev_is_dma_coherent(dev)) {
> + arch_sync_dma_for_cpu(paddr, size, dir);
> + arch_sync_dma_for_cpu_all();
> + }
> +
> + if (unlikely(is_swiotlb_buffer(paddr)))
> + swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
> +
> + if (dir == DMA_FROM_DEVICE)
> + arch_dma_mark_clean(paddr, size);
> +}
> +
> +static inline dma_addr_t dma_direct_map_page(struct device *dev,
> + struct page *page, unsigned long offset, size_t size,
> + enum dma_data_direction dir, unsigned long attrs)
> +{
> + phys_addr_t phys = page_to_phys(page) + offset;
> + dma_addr_t dma_addr = phys_to_dma(dev, phys);
> +
> + if (unlikely(swiotlb_force == SWIOTLB_FORCE))
> + return swiotlb_map(dev, phys, size, dir, attrs);
> +
> + if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
> + if (swiotlb_force != SWIOTLB_NO_FORCE)
> + return swiotlb_map(dev, phys, size, dir, attrs);
> +
> + dev_WARN_ONCE(dev, 1,
> + "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
> + &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
> + return DMA_MAPPING_ERROR;
> + }
> +
> + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> + arch_sync_dma_for_device(phys, size, dir);
> + return dma_addr;
> +}
> +
> +static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
> + size_t size, enum dma_data_direction dir, unsigned long attrs)
> +{
> + phys_addr_t phys = dma_to_phys(dev, addr);
> +
> + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> + dma_direct_sync_single_for_cpu(dev, addr, size, dir);
> +
> + if (unlikely(is_swiotlb_buffer(phys)))
> + swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
> +}
> +#endif /* _KERNEL_DMA_DIRECT_H */
> diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
> index 335ba183e0956a..51bb8fa8eb8948 100644
> --- a/kernel/dma/mapping.c
> +++ b/kernel/dma/mapping.c
> @@ -7,7 +7,6 @@
> */
> #include <linux/memblock.h> /* for max_pfn */
> #include <linux/acpi.h>
> -#include <linux/dma-direct.h>
> #include <linux/dma-map-ops.h>
> #include <linux/export.h>
> #include <linux/gfp.h>
> @@ -15,6 +14,7 @@
> #include <linux/slab.h>
> #include <linux/vmalloc.h>
> #include "debug.h"
> +#include "direct.h"
>
> /*
> * Managed DMA API
>
--
Alexey
Powered by blists - more mailing lists