lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon,  4 May 2020 16:58:05 +0200
From:   Alexandre Chartre <alexandre.chartre@...cle.com>
To:     tglx@...utronix.de, mingo@...hat.com, bp@...en8.de, hpa@...or.com,
        dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
        x86@...nel.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc:     pbonzini@...hat.com, konrad.wilk@...cle.com,
        jan.setjeeilers@...cle.com, liran.alon@...cle.com,
        junaids@...gle.com, graf@...zon.de, rppt@...ux.vnet.ibm.com,
        kuzuno@...il.com, mgross@...ux.intel.com,
        alexandre.chartre@...cle.com
Subject: [RFC v4][PATCH part-2 08/13] mm/dpt: Keep track of VA ranges mapped in a decorated page-table

Add functions to keep track of VA ranges mapped in a decorated page-table.
This will be used when unmapping to ensure the same range is unmapped,
at the same page-table level. This will also be used to handle mapping
and unmapping of overlapping VA ranges.

Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
 arch/x86/include/asm/dpt.h | 12 ++++++++
 arch/x86/mm/dpt.c          | 60 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 72 insertions(+)

diff --git a/arch/x86/include/asm/dpt.h b/arch/x86/include/asm/dpt.h
index 5a38d97a70a8..0d74afb10141 100644
--- a/arch/x86/include/asm/dpt.h
+++ b/arch/x86/include/asm/dpt.h
@@ -16,6 +16,17 @@ enum page_table_level {
 	PGT_LEVEL_PGD
 };
 
+/*
+ * Structure to keep track of address ranges mapped into a decorated
+ * page-table.
+ */
+struct dpt_range_mapping {
+	struct list_head list;
+	void *ptr;			/* range start address */
+	size_t size;			/* range size */
+	enum page_table_level level;	/* mapping level */
+};
+
 /*
  * A decorated page-table (dpt) encapsulates a native page-table (e.g.
  * a PGD) and maintain additional attributes related to this page-table.
@@ -24,6 +35,7 @@ struct dpt {
 	spinlock_t		lock;		/* protect all attributes */
 	pgd_t			*pagetable;	/* the actual page-table */
 	unsigned int		alignment;	/* page-table alignment */
+	struct list_head	mapping_list;	/* list of VA range mapping */
 
 	/*
 	 * A page-table can have direct references to another page-table,
diff --git a/arch/x86/mm/dpt.c b/arch/x86/mm/dpt.c
index 0e725344b921..12eb0d794d84 100644
--- a/arch/x86/mm/dpt.c
+++ b/arch/x86/mm/dpt.c
@@ -59,6 +59,24 @@ static int dpt_add_backend_page(struct dpt *dpt, void *addr,
 	return 0;
 }
 
+/*
+ * Return the range mapping starting at the specified address, or NULL if
+ * no such range is found.
+ */
+static struct dpt_range_mapping *dpt_get_range_mapping(struct dpt *dpt,
+						       void *ptr)
+{
+	struct dpt_range_mapping *range;
+
+	lockdep_assert_held(&dpt->lock);
+	list_for_each_entry(range, &dpt->mapping_list, list) {
+		if (range->ptr == ptr)
+			return range;
+	}
+
+	return NULL;
+}
+
 /*
  * Check if an offset in the page-table is valid, i.e. check that the
  * offset is on a page effectively belonging to the page-table.
@@ -563,6 +581,7 @@ static int dpt_copy_pgd_range(struct dpt *dpt,
 int dpt_map_range(struct dpt *dpt, void *ptr, size_t size,
 		  enum page_table_level level)
 {
+	struct dpt_range_mapping *range_mapping;
 	unsigned long addr = (unsigned long)ptr;
 	unsigned long end = addr + ((unsigned long)size);
 	unsigned long flags;
@@ -571,8 +590,36 @@ int dpt_map_range(struct dpt *dpt, void *ptr, size_t size,
 	pr_debug("DPT %p: MAP %px/%lx/%d\n", dpt, ptr, size, level);
 
 	spin_lock_irqsave(&dpt->lock, flags);
+
+	/* check if the range is already mapped */
+	range_mapping = dpt_get_range_mapping(dpt, ptr);
+	if (range_mapping) {
+		pr_debug("DPT %p: MAP %px/%lx/%d already mapped\n",
+			 dpt, ptr, size, level);
+		err = -EBUSY;
+		goto done;
+	}
+
+	/* map new range */
+	range_mapping = kmalloc(sizeof(*range_mapping), GFP_KERNEL);
+	if (!range_mapping) {
+		err = -ENOMEM;
+		goto done;
+	}
+
 	err = dpt_copy_pgd_range(dpt, dpt->pagetable, current->mm->pgd,
 				 addr, end, level);
+	if (err) {
+		kfree(range_mapping);
+		goto done;
+	}
+
+	INIT_LIST_HEAD(&range_mapping->list);
+	range_mapping->ptr = ptr;
+	range_mapping->size = size;
+	range_mapping->level = level;
+	list_add(&range_mapping->list, &dpt->mapping_list);
+done:
 	spin_unlock_irqrestore(&dpt->lock, flags);
 
 	return err;
@@ -611,6 +658,8 @@ struct dpt *dpt_create(unsigned int pgt_alignment)
 	if (!dpt)
 		return NULL;
 
+	INIT_LIST_HEAD(&dpt->mapping_list);
+
 	pagetable = (unsigned long)__get_free_pages(GFP_KERNEL_ACCOUNT |
 						    __GFP_ZERO,
 						    alloc_order);
@@ -632,6 +681,7 @@ void dpt_destroy(struct dpt *dpt)
 {
 	unsigned int pgt_alignment;
 	unsigned int alloc_order;
+	struct dpt_range_mapping *range, *range_next;
 	unsigned long index;
 	void *entry;
 
@@ -643,6 +693,16 @@ void dpt_destroy(struct dpt *dpt)
 			free_page((unsigned long)DPT_BACKEND_PAGE_ADDR(entry));
 	}
 
+	list_for_each_entry_safe(range, range_next, &dpt->mapping_list, list) {
+		list_del(&range->list);
+		kfree(range);
+	}
+
+	if (dpt->backend_pages_count) {
+		xa_for_each(&dpt->backend_pages, index, entry)
+			free_page((unsigned long)DPT_BACKEND_PAGE_ADDR(entry));
+	}
+
 	if (dpt->pagetable) {
 		pgt_alignment = dpt->alignment;
 		alloc_order = round_up(PAGE_SIZE + pgt_alignment,
-- 
2.18.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ