[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181008080246.20543-8-hch@lst.de>
Date: Mon, 8 Oct 2018 10:02:43 +0200
From: Christoph Hellwig <hch@....de>
To: Will Deacon <will.deacon@....com>,
Catalin Marinas <catalin.marinas@....com>,
Robin Murphy <robin.murphy@....com>,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
Cc: linux-arm-kernel@...ts.infradead.org,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org
Subject: [PATCH 07/10] swiotlb: refactor swiotlb_map_page
Remove the somewhat useless map_single function, and replace it with a
swiotlb_bounce_page handler that handles everything related to actually
bouncing a page.
Signed-off-by: Christoph Hellwig <hch@....de>
---
kernel/dma/swiotlb.c | 77 +++++++++++++++++++++-----------------------
1 file changed, 36 insertions(+), 41 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 15755d7a5242..4d7a4d85d71e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -543,26 +543,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
return tlb_addr;
}
-/*
- * Allocates bounce buffer and returns its physical address.
- */
-static phys_addr_t
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- dma_addr_t start_dma_addr;
-
- if (swiotlb_force == SWIOTLB_NO_FORCE) {
- dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
- &phys);
- return SWIOTLB_MAP_ERROR;
- }
-
- start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
- return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
- dir, attrs);
-}
-
/*
* tlb_addr is the physical address of the bounce buffer to unmap.
*/
@@ -714,6 +694,34 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
return true;
}
+static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ dma_addr_t dma_addr;
+
+ if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
+ dev_warn_ratelimited(dev,
+ "Cannot do DMA to address %pa\n", phys);
+ return DIRECT_MAPPING_ERROR;
+ }
+
+ /* Oh well, have to allocate and map a bounce buffer. */
+ *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
+ *phys, size, dir, attrs);
+ if (*phys == SWIOTLB_MAP_ERROR)
+ return DIRECT_MAPPING_ERROR;
+
+ /* Ensure that the address returned is DMA'ble */
+ dma_addr = __phys_to_dma(dev, *phys);
+ if (unlikely(!dma_capable(dev, dma_addr, size))) {
+ swiotlb_tbl_unmap_single(dev, *phys, size, dir,
+ attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return DIRECT_MAPPING_ERROR;
+ }
+
+ return dma_addr;
+}
+
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
* physical address to use is returned.
@@ -726,8 +734,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t map, phys = page_to_phys(page) + offset;
- dma_addr_t dev_addr = phys_to_dma(dev, phys);
+ phys_addr_t phys = page_to_phys(page) + offset;
+ dma_addr_t dma_addr = phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE);
/*
@@ -735,26 +743,13 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
- return dev_addr;
-
- trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
-
- /* Oh well, have to allocate and map a bounce buffer. */
- map = map_single(dev, phys, size, dir, attrs);
- if (map == SWIOTLB_MAP_ERROR)
- return DIRECT_MAPPING_ERROR;
-
- dev_addr = __phys_to_dma(dev, map);
-
- /* Ensure that the address returned is DMA'ble */
- if (dma_capable(dev, dev_addr, size))
- return dev_addr;
-
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+ if (!dma_capable(dev, dma_addr, size) ||
+ swiotlb_force == SWIOTLB_FORCE) {
+ trace_swiotlb_bounced(dev, dma_addr, size, swiotlb_force);
+ dma_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
+ }
- return DIRECT_MAPPING_ERROR;
+ return dma_addr;
}
/*
--
2.19.0
Powered by blists - more mailing lists