lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 13 May 2019 16:38:17 +0200
From:   Alexandre Chartre <alexandre.chartre@...cle.com>
To:     pbonzini@...hat.com, rkrcmar@...hat.com, tglx@...utronix.de,
        mingo@...hat.com, bp@...en8.de, hpa@...or.com,
        dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
        kvm@...r.kernel.org, x86@...nel.org, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Cc:     konrad.wilk@...cle.com, jan.setjeeilers@...cle.com,
        liran.alon@...cle.com, jwadams@...gle.com,
        alexandre.chartre@...cle.com
Subject: [RFC KVM 09/27] kvm/isolation: function to track buffers allocated for the KVM page table

The KVM page table will have direct references to the kernel page table,
at different levels (PGD, P4D, PUD, PMD). When freeing the KVM page table,
we should make sure that we free parts actually allocated for the KVM
page table, and not parts of the kernel page table referenced from the
KVM page table. To do so, we will keep track of buffers when building
the KVM page table.

Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
 arch/x86/kvm/isolation.c |  119 ++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 119 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
index 43fd924..1efdab1 100644
--- a/arch/x86/kvm/isolation.c
+++ b/arch/x86/kvm/isolation.c
@@ -8,12 +8,60 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/printk.h>
+#include <linux/slab.h>
 
 #include <asm/mmu_context.h>
 #include <asm/pgalloc.h>
 
 #include "isolation.h"
 
+
+enum page_table_level {
+	PGT_LEVEL_PTE,
+	PGT_LEVEL_PMD,
+	PGT_LEVEL_PUD,
+	PGT_LEVEL_P4D,
+	PGT_LEVEL_PGD
+};
+
+/*
+ * The KVM page table can have direct references to the kernel page table,
+ * at different levels (PGD, P4D, PUD, PMD). When freeing the KVM page
+ * table, we should make sure that we free parts actually allocated for
+ * the KVM page table, and not parts of the kernel page table referenced
+ * from the KVM page table.
+ *
+ * To do so, page table directories (struct pgt_directory) are used to keep
+ * track of buffers allocated when building the KVM page table. Also, as
+ * a page table can have many buffers, page table directory groups (struct
+ * (pgt_directory_group) are used to group page table directories and save
+ * some space (instead of allocating each directory individually).
+ */
+
+#define PGT_DIRECTORY_GROUP_SIZE	64
+
+struct pgt_directory {
+	enum page_table_level level;
+	void *ptr;
+};
+
+struct pgt_directory_group {
+	struct list_head list;
+	int count;
+	struct pgt_directory directory[PGT_DIRECTORY_GROUP_SIZE];
+};
+
+static LIST_HEAD(kvm_pgt_dgroup_list);
+static DEFINE_MUTEX(kvm_pgt_dgroup_lock);
+
+/*
+ * Get the pointer to the beginning of a page table directory from a page
+ * table directory entry.
+ */
+#define PGTD_ALIGN(entry)	\
+	((typeof(entry))(((unsigned long)(entry)) & PAGE_MASK))
+
+
 struct mm_struct kvm_mm = {
 	.mm_rb			= RB_ROOT,
 	.mm_users		= ATOMIC_INIT(2),
@@ -43,6 +91,77 @@ struct mm_struct kvm_mm = {
 static bool __read_mostly address_space_isolation;
 module_param(address_space_isolation, bool, 0444);
 
+
+static struct pgt_directory_group *pgt_directory_group_create(void)
+{
+	struct pgt_directory_group *dgroup;
+
+	dgroup = kzalloc(sizeof(struct pgt_directory_group), GFP_KERNEL);
+	if (!dgroup)
+		return NULL;
+
+	INIT_LIST_HEAD(&dgroup->list);
+	dgroup->count = 0;
+
+	return dgroup;
+}
+
+static bool kvm_add_pgt_directory(void *ptr, enum page_table_level level)
+{
+	struct pgt_directory_group *dgroup;
+	int index;
+
+	mutex_lock(&kvm_pgt_dgroup_lock);
+
+	if (list_empty(&kvm_pgt_dgroup_list))
+		dgroup = NULL;
+	else
+		dgroup = list_entry(kvm_pgt_dgroup_list.next,
+				    struct pgt_directory_group, list);
+
+	if (!dgroup || dgroup->count >= PGT_DIRECTORY_GROUP_SIZE) {
+		dgroup = pgt_directory_group_create();
+		if (!dgroup) {
+			mutex_unlock(&kvm_pgt_dgroup_lock);
+			return false;
+		}
+		list_add_tail(&dgroup->list, &kvm_pgt_dgroup_list);
+	}
+
+	index = dgroup->count;
+	dgroup->directory[index].level = level;
+	dgroup->directory[index].ptr = PGTD_ALIGN(ptr);
+	dgroup->count = index + 1;
+
+	mutex_unlock(&kvm_pgt_dgroup_lock);
+
+	return true;
+}
+
+static bool kvm_valid_pgt_entry(void *ptr)
+{
+	struct pgt_directory_group *dgroup;
+	int i;
+
+	mutex_lock(&kvm_pgt_dgroup_lock);
+
+	ptr = PGTD_ALIGN(ptr);
+	list_for_each_entry(dgroup, &kvm_pgt_dgroup_list, list) {
+		for (i = 0; i < dgroup->count; i++) {
+			if (dgroup->directory[i].ptr == ptr) {
+				mutex_unlock(&kvm_pgt_dgroup_lock);
+				return true;
+			}
+		}
+	}
+
+	mutex_unlock(&kvm_pgt_dgroup_lock);
+
+	return false;
+
+}
+
+
 static int kvm_isolation_init_mm(void)
 {
 	pgd_t *kvm_pgd;
-- 
1.7.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ