[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190421011719.14909-3-baolu.lu@linux.intel.com>
Date: Sun, 21 Apr 2019 09:17:11 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: David Woodhouse <dwmw2@...radead.org>,
Joerg Roedel <joro@...tes.org>
Cc: ashok.raj@...el.com, jacob.jun.pan@...el.com, alan.cox@...el.com,
kevin.tian@...el.com, mika.westerberg@...ux.intel.com,
pengfei.xu@...el.com,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
Christoph Hellwig <hch@....de>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Robin Murphy <robin.murphy@....com>,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH v3 02/10] swiotlb: Factor out slot allocation and free
This moves slot allocation and free code into two common
functions in order to avoid code duplication. There's no
functional change.
Cc: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
Cc: Christoph Hellwig <hch@....de>
Cc: Marek Szyprowski <m.szyprowski@...sung.com>
Cc: Robin Murphy <robin.murphy@....com>
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
kernel/dma/swiotlb.c | 72 +++++++++++++++++++++++++++++---------------
1 file changed, 47 insertions(+), 25 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 53012db1e53c..173122d16b7f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -439,11 +439,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
}
}
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- dma_addr_t tbl_dma_addr,
- phys_addr_t orig_addr, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+static phys_addr_t
+swiotlb_tbl_alloc_tlb(struct device *hwdev, dma_addr_t tbl_dma_addr,
+ phys_addr_t orig_addr, size_t size)
{
unsigned long flags;
phys_addr_t tlb_addr;
@@ -539,8 +537,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
- if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
- dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return DMA_MAPPING_ERROR;
found:
io_tlb_used += nslots;
@@ -553,32 +549,16 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
return tlb_addr;
}
-/*
- * tlb_addr is the physical address of the bounce buffer to unmap.
- */
-void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+static void
+swiotlb_tbl_free_tlb(struct device *hwdev, phys_addr_t tlb_addr, size_t size)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
- /*
- * First, sync the memory before unmapping the entry
- */
- if (orig_addr != INVALID_PHYS_ADDR &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
- swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
@@ -610,6 +590,48 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+ dma_addr_t tbl_dma_addr,
+ phys_addr_t orig_addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t tlb_addr;
+
+ tlb_addr = swiotlb_tbl_alloc_tlb(hwdev, tbl_dma_addr, orig_addr, size);
+ if (tlb_addr == DMA_MAPPING_ERROR) {
+ if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
+ dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n",
+ size);
+ } else if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
+ swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
+ }
+
+ return tlb_addr;
+}
+
+/*
+ * tlb_addr is the physical address of the bounce buffer to unmap.
+ */
+void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ phys_addr_t orig_addr = io_tlb_orig_addr[index];
+
+ /*
+ * First, sync the memory before unmapping the entry
+ */
+ if (orig_addr != INVALID_PHYS_ADDR &&
+ !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
+
+ swiotlb_tbl_free_tlb(hwdev, tlb_addr, size);
+}
+
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
--
2.17.1
Powered by blists - more mailing lists