lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1557758315-12667-18-git-send-email-alexandre.chartre@oracle.com>
Date:   Mon, 13 May 2019 16:38:25 +0200
From:   Alexandre Chartre <alexandre.chartre@...cle.com>
To:     pbonzini@...hat.com, rkrcmar@...hat.com, tglx@...utronix.de,
        mingo@...hat.com, bp@...en8.de, hpa@...or.com,
        dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
        kvm@...r.kernel.org, x86@...nel.org, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Cc:     konrad.wilk@...cle.com, jan.setjeeilers@...cle.com,
        liran.alon@...cle.com, jwadams@...gle.com,
        alexandre.chartre@...cle.com
Subject: [RFC KVM 17/27] kvm/isolation: improve mapping copy when mapping is already present

A mapping can already exist if a buffer was mapped in the KVM
address space, and then the buffer was freed but there was no
request to unmap from the KVM address space. In that case, clear
the existing mapping before mapping the new buffer.

Also if the new mapping is a subset of an already larger mapped
range, then remap the entire larger map.

Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
 arch/x86/kvm/isolation.c |   67 +++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 63 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
index e494a15..539e287 100644
--- a/arch/x86/kvm/isolation.c
+++ b/arch/x86/kvm/isolation.c
@@ -88,6 +88,9 @@ struct mm_struct kvm_mm = {
 DEFINE_STATIC_KEY_FALSE(kvm_isolation_enabled);
 EXPORT_SYMBOL(kvm_isolation_enabled);
 
+static void kvm_clear_mapping(void *ptr, size_t size,
+			      enum page_table_level level);
+
 /*
  * When set to true, KVM #VMExit handlers run in isolated address space
  * which maps only KVM required code and per-VM information instead of
@@ -721,6 +724,7 @@ static int kvm_copy_mapping(void *ptr, size_t size, enum page_table_level level)
 {
 	unsigned long addr = (unsigned long)ptr;
 	unsigned long end = addr + ((unsigned long)size);
+	unsigned long range_addr, range_end;
 	struct kvm_range_mapping *range_mapping;
 	bool subset;
 	int err;
@@ -728,22 +732,77 @@ static int kvm_copy_mapping(void *ptr, size_t size, enum page_table_level level)
 	BUG_ON(current->mm == &kvm_mm);
 	pr_debug("KERNMAP COPY addr=%px size=%lx level=%d\n", ptr, size, level);
 
-	range_mapping = kmalloc(sizeof(struct kvm_range_mapping), GFP_KERNEL);
-	if (!range_mapping)
-		return -ENOMEM;
+	mutex_lock(&kvm_range_mapping_lock);
+
+	/*
+	 * A mapping can already exist if the buffer was mapped and then
+	 * freed but there was no request to unmap it. We might also be
+	 * trying to map a subset of an already mapped buffer.
+	 */
+	range_mapping = kvm_get_range_mapping_locked(ptr, &subset);
+	if (range_mapping) {
+		if (subset) {
+			pr_debug("range %px/%lx/%d is a subset of %px/%lx/%d already mapped, remapping\n",
+				 ptr, size, level, range_mapping->ptr,
+				 range_mapping->size, range_mapping->level);
+			range_addr = (unsigned long)range_mapping->ptr;
+			range_end = range_addr +
+				((unsigned long)range_mapping->size);
+			err = kvm_copy_pgd_range(&kvm_mm, current->mm,
+						 range_addr, range_end,
+						 range_mapping->level);
+			if (end <= range_end) {
+				/*
+				 * We effectively have a subset, fully contained
+				 * in the superset. So we are done.
+				 */
+				mutex_unlock(&kvm_range_mapping_lock);
+				return err;
+			}
+			/*
+			 * The new range is larger than the existing mapped
+			 * range. So we need an extra mapping to map the end
+			 * of the range.
+			 */
+			addr = range_end;
+			range_mapping = NULL;
+			pr_debug("adding extra range %lx-%lx (%d)\n", addr,
+				 end, level);
+		} else {
+			pr_debug("range %px size=%lx level=%d already mapped, clearing\n",
+				 range_mapping->ptr, range_mapping->size,
+				 range_mapping->level);
+			kvm_clear_mapping(range_mapping->ptr,
+					  range_mapping->size,
+					  range_mapping->level);
+			list_del(&range_mapping->list);
+		}
+	}
+
+	if (!range_mapping) {
+		range_mapping = kmalloc(sizeof(struct kvm_range_mapping),
+		    GFP_KERNEL);
+		if (!range_mapping) {
+			mutex_unlock(&kvm_range_mapping_lock);
+			return -ENOMEM;
+		}
+		INIT_LIST_HEAD(&range_mapping->list);
+	}
 
 	err = kvm_copy_pgd_range(&kvm_mm, current->mm, addr, end, level);
 	if (err) {
+		mutex_unlock(&kvm_range_mapping_lock);
 		kfree(range_mapping);
 		return err;
 	}
 
-	INIT_LIST_HEAD(&range_mapping->list);
 	range_mapping->ptr = ptr;
 	range_mapping->size = size;
 	range_mapping->level = level;
 	list_add(&range_mapping->list, &kvm_range_mapping_list);
 
+	mutex_unlock(&kvm_range_mapping_lock);
+
 	return 0;
 }
 
-- 
1.7.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ