[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1490164067-12552-6-git-send-email-thunder.leizhen@huawei.com>
Date: Wed, 22 Mar 2017 14:27:45 +0800
From: Zhen Lei <thunder.leizhen@...wei.com>
To: Joerg Roedel <joro@...tes.org>,
iommu <iommu@...ts.linux-foundation.org>,
Robin Murphy <robin.murphy@....com>,
David Woodhouse <dwmw2@...radead.org>,
Sudeep Dutt <sudeep.dutt@...el.com>,
Ashutosh Dixit <ashutosh.dixit@...el.com>,
linux-kernel <linux-kernel@...r.kernel.org>
CC: Zefan Li <lizefan@...wei.com>, Xinwei Hu <huxinwei@...wei.com>,
"Tianhong Ding" <dingtianhong@...wei.com>,
Hanjun Guo <guohanjun@...wei.com>,
Zhen Lei <thunder.leizhen@...wei.com>
Subject: [PATCH 5/7] iommu/iova: to optimize the allocation performance of dma64
Currently we always search free iova space for dma64 begin at the last
node of iovad rb-tree. In the worst case, there maybe too many nodes exist
at the tail, so that we should traverse many times for the first loop in
__alloc_and_insert_iova_range. As we traced, more than 10K times for the
case of iperf.
__alloc_and_insert_iova_range:
......
curr = __get_cached_rbnode(iovad, &limit_pfn);
//--> return rb_last(&iovad->rbroot);
while (curr) {
......
curr = rb_prev(curr);
}
So add cached64_node to take the same effect as cached32_node, and add
the start_pfn boundary of dma64, to prevent a iova cross both dma32 and
dma64 area.
|-------------------|------------------------------|
|<--cached32_node-->|<--------cached64_node------->|
| |
start_pfn dma_32bit_pfn + 1
Signed-off-by: Zhen Lei <thunder.leizhen@...wei.com>
---
drivers/iommu/iova.c | 46 +++++++++++++++++++++++++++-------------------
include/linux/iova.h | 5 +++--
2 files changed, 30 insertions(+), 21 deletions(-)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 87a9332..23abe84 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -37,10 +37,15 @@ insert_iova_boundary(struct iova_domain *iovad)
{
struct iova *iova;
unsigned long start_pfn_32bit = iovad->start_pfn;
+ unsigned long start_pfn_64bit = iovad->dma_32bit_pfn + 1;
iova = reserve_iova(iovad, start_pfn_32bit, start_pfn_32bit);
BUG_ON(!iova);
iovad->cached32_node = &iova->node;
+
+ iova = reserve_iova(iovad, start_pfn_64bit, start_pfn_64bit);
+ BUG_ON(!iova);
+ iovad->cached64_node = &iova->node;
}
void
@@ -62,8 +67,8 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
init_iova_rcaches(iovad);
/*
- * Insert boundary nodes for dma32. So cached32_node can not be NULL in
- * future.
+ * Insert boundary nodes for dma32 and dma64. So cached32_node and
+ * cached64_node can not be NULL in future.
*/
insert_iova_boundary(iovad);
}
@@ -75,10 +80,10 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
struct rb_node *cached_node;
struct rb_node *next_node;
- if (*limit_pfn > iovad->dma_32bit_pfn)
- return rb_last(&iovad->rbroot);
- else
+ if (*limit_pfn <= iovad->dma_32bit_pfn)
cached_node = iovad->cached32_node;
+ else
+ cached_node = iovad->cached64_node;
next_node = rb_next(cached_node);
if (next_node) {
@@ -94,29 +99,32 @@ static void
__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
{
struct iova *cached_iova;
+ struct rb_node **cached_node;
- if (new->pfn_hi > iovad->dma_32bit_pfn)
- return;
+ if (new->pfn_hi <= iovad->dma_32bit_pfn)
+ cached_node = &iovad->cached32_node;
+ else
+ cached_node = &iovad->cached64_node;
- cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
+ cached_iova = rb_entry(*cached_node, struct iova, node);
if (new->pfn_lo <= cached_iova->pfn_lo)
- iovad->cached32_node = rb_prev(&new->node);
+ *cached_node = rb_prev(&new->node);
}
static void
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
struct iova *cached_iova;
- struct rb_node *curr;
+ struct rb_node **cached_node;
- curr = iovad->cached32_node;
- cached_iova = rb_entry(curr, struct iova, node);
+ if (free->pfn_hi <= iovad->dma_32bit_pfn)
+ cached_node = &iovad->cached32_node;
+ else
+ cached_node = &iovad->cached64_node;
- if (free->pfn_lo >= cached_iova->pfn_lo) {
- /* only cache if it's below 32bit pfn */
- if (free->pfn_hi <= iovad->dma_32bit_pfn)
- iovad->cached32_node = rb_prev(&free->node);
- }
+ cached_iova = rb_entry(*cached_node, struct iova, node);
+ if (free->pfn_lo >= cached_iova->pfn_lo)
+ *cached_node = rb_prev(&free->node);
}
/*
@@ -283,7 +291,7 @@ EXPORT_SYMBOL_GPL(iova_cache_put);
* alloc_iova - allocates an iova
* @iovad: - iova domain in question
* @size: - size of page frames to allocate
- * @limit_pfn: - max limit address
+ * @limit_pfn: - max limit address(included)
* @size_aligned: - set if size_aligned address range is required
* This function allocates an iova in the range iovad->start_pfn to limit_pfn,
* searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
@@ -402,7 +410,7 @@ EXPORT_SYMBOL_GPL(free_iova);
* alloc_iova_fast - allocates an iova from rcache
* @iovad: - iova domain in question
* @size: - size of page frames to allocate
- * @limit_pfn: - max limit address
+ * @limit_pfn: - max limit address(included)
* This function tries to satisfy an iova allocation from the rcache,
* and falls back to regular allocation on failure.
*/
diff --git a/include/linux/iova.h b/include/linux/iova.h
index f27bb2c..844d723 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -40,10 +40,11 @@ struct iova_rcache {
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
- struct rb_node *cached32_node; /* Save last alloced node */
+ struct rb_node *cached32_node; /* Save last alloced node, 32bits */
+ struct rb_node *cached64_node; /* Save last alloced node, 64bits */
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
- unsigned long dma_32bit_pfn;
+ unsigned long dma_32bit_pfn; /* max dma32 limit address(included) */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
};
--
2.5.0
Powered by blists - more mailing lists