[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201019113100.23661-4-chao.hao@mediatek.com>
Date: Mon, 19 Oct 2020 19:30:59 +0800
From: Chao Hao <chao.hao@...iatek.com>
To: Joerg Roedel <joro@...tes.org>,
Matthias Brugger <matthias.bgg@...il.com>
CC: <iommu@...ts.linux-foundation.org>, <linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-mediatek@...ts.infradead.org>, <wsd_upstream@...iatek.com>,
Yong Wu <yong.wu@...iatek.com>, FY Yang <fy.yang@...iatek.com>,
Jun Wen <jun.wen@...iatek.com>,
Mingyuan Ma <mingyuan.ma@...iatek.com>,
Chao Hao <chao.hao@...iatek.com>
Subject: [PATCH 3/4] iommu/mediatek: Remove unnecessary tlb sync
As is "[PATCH 2/4]" described, we will use iotlb_sync_range() to replace
iotlb_sync(), tlb_add_range() and tlb_flush_walk/leaf() to enhance
performance. So we will remove the implementation of iotlb_sync(),
tlb_add_range() and tlb_flush_walk/leaf().
Signed-off-by: Chao Hao <chao.hao@...iatek.com>
---
drivers/iommu/mtk_iommu.c | 28 ++++------------------------
1 file changed, 4 insertions(+), 24 deletions(-)
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index d3400c15ff7b..bca1f53c0ab9 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -229,21 +229,15 @@ static void __mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size)
mtk_iommu_tlb_flush_range_sync(iova, size, 0, NULL)
}
-static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
+static void mtk_iommu_tlb_flush_skip(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct mtk_iommu_data *data = cookie;
- struct iommu_domain *domain = &data->m4u_dom->domain;
-
- iommu_iotlb_gather_add_page(domain, gather, iova, granule);
}
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
- .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
- .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
- .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
+ .tlb_flush_walk = mtk_iommu_tlb_flush_skip,
+ .tlb_flush_leaf = mtk_iommu_tlb_flush_skip,
};
static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
@@ -443,19 +437,6 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
}
-static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
- struct iommu_iotlb_gather *gather)
-{
- struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- size_t length = gather->end - gather->start;
-
- if (gather->start == ULONG_MAX)
- return;
-
- mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
- data);
-}
-
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -542,7 +523,6 @@ static const struct iommu_ops mtk_iommu_ops = {
.unmap = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync_range = __mtk_iommu_tlb_flush_range_sync,
- .iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device,
.release_device = mtk_iommu_release_device,
--
2.18.0
Powered by blists - more mailing lists