lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1490930665-9696-5-git-send-email-thunder.leizhen@huawei.com>
Date:   Fri, 31 Mar 2017 11:24:22 +0800
From:   Zhen Lei <thunder.leizhen@...wei.com>
To:     Joerg Roedel <joro@...tes.org>,
        iommu <iommu@...ts.linux-foundation.org>,
        Robin Murphy <robin.murphy@....com>,
        David Woodhouse <dwmw2@...radead.org>,
        Sudeep Dutt <sudeep.dutt@...el.com>,
        Ashutosh Dixit <ashutosh.dixit@...el.com>,
        linux-kernel <linux-kernel@...r.kernel.org>
CC:     Zefan Li <lizefan@...wei.com>, Xinwei Hu <huxinwei@...wei.com>,
        "Tianhong Ding" <dingtianhong@...wei.com>,
        Hanjun Guo <guohanjun@...wei.com>,
        Zhen Lei <thunder.leizhen@...wei.com>
Subject: [PATCH v2 4/7] iommu/iova: adjust __cached_rbnode_insert_update

For case 2 and 3, adjust cached32_node to the new place, case 1 keep no
change.

For example:
case1: (the right part was allocated)
	|------------------------------|
	|<-----free---->|<--new_iova-->|
	|
	|
   cached32_node

case2: (all was allocated)
	|------------------------------|
	|<---------new_iova----------->|
	|
	|
   cached32_node

case3:
	|-----------------------|......|---------|
        |..free..|<--new_iova-->|<not enough iova space>
        |                              |
        |                              |
   cached32_node(new)             cached32_node(old)

Signed-off-by: Zhen Lei <thunder.leizhen@...wei.com>
---
 drivers/iommu/iova.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b5a148e..87a9332 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -91,12 +91,16 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
 }
 
 static void
-__cached_rbnode_insert_update(struct iova_domain *iovad,
-	unsigned long limit_pfn, struct iova *new)
+__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
 {
-	if (limit_pfn != iovad->dma_32bit_pfn)
+	struct iova *cached_iova;
+
+	if (new->pfn_hi > iovad->dma_32bit_pfn)
 		return;
-	iovad->cached32_node = &new->node;
+
+	cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
+	if (new->pfn_lo <= cached_iova->pfn_lo)
+		iovad->cached32_node = rb_prev(&new->node);
 }
 
 static void
@@ -131,12 +135,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 {
 	struct rb_node *prev, *curr;
 	unsigned long flags;
-	unsigned long saved_pfn;
 	unsigned long pad_size = 0;
 
 	/* Walk the tree backwards */
 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
-	saved_pfn = limit_pfn;
 	curr = __get_cached_rbnode(iovad, &limit_pfn);
 	prev = curr;
 	while (curr) {
@@ -197,11 +199,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 		rb_link_node(&new->node, parent, entry);
 		rb_insert_color(&new->node, &iovad->rbroot);
 	}
-	__cached_rbnode_insert_update(iovad, saved_pfn, new);
+	__cached_rbnode_insert_update(iovad, new);
 
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 
-
 	return 0;
 }
 
-- 
2.5.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ