lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20160803182308.19227-1-yinghai@kernel.org>
Date:	Wed,  3 Aug 2016 11:23:08 -0700
From:	Yinghai Lu <yinghai@...nel.org>
To:	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...hat.com>,
	"H . Peter Anvin" <hpa@...or.com>,
	Kees Cook <keescook@...omium.org>,
	"Rafael J . Wysocki" <rjw@...ysocki.net>,
	Pavel Machek <pavel@....cz>
Cc:	the arch/x86 maintainers <x86@...nel.org>,
	Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
	Linux PM list <linux-pm@...r.kernel.org>,
	kernel-hardening@...ts.openwall.com,
	Thomas Garnier <thgarnie@...gle.com>,
	Yinghai Lu <yinghai@...nel.org>
Subject: [PATCH v2] x86/power/64: Support unaligned addresses for temporary mapping

From: Thomas Garnier <thgarnie@...gle.com>

Correctly setup the temporary mapping for hibernation. Previous
implementation assumed the offset between KVA and PA was aligned on the PGD level.
With KASLR memory randomization enabled, the offset is randomized on the PUD
level. This change supports unaligned up to PMD.

Signed-off-by: Thomas Garnier <thgarnie@...gle.com>
[yinghai: change loop to virtual address]
Signed-off-by: Yinghai Lu <yinghai@...nel.org>
---
 arch/x86/mm/ident_map.c |   54 ++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 22 deletions(-)

Index: linux-2.6/arch/x86/mm/ident_map.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/ident_map.c
+++ linux-2.6/arch/x86/mm/ident_map.c
@@ -3,40 +3,47 @@
  * included by both the compressed kernel and the regular kernel.
  */
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
 			   unsigned long addr, unsigned long end)
 {
-	addr &= PMD_MASK;
-	for (; addr < end; addr += PMD_SIZE) {
-		pmd_t *pmd = pmd_page + pmd_index(addr);
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+
+	vaddr &= PMD_MASK;
+	for (; vaddr < vend; vaddr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(vaddr);
 
 		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
+			set_pmd(pmd, __pmd((vaddr - off) | info->pmd_flag));
 	}
 }
 
 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 			  unsigned long addr, unsigned long end)
 {
-	unsigned long next;
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+	unsigned long vnext;
 
-	for (; addr < end; addr = next) {
-		pud_t *pud = pud_page + pud_index(addr);
+	for (; vaddr < vend; vaddr = vnext) {
+		pud_t *pud = pud_page + pud_index(vaddr);
 		pmd_t *pmd;
 
-		next = (addr & PUD_MASK) + PUD_SIZE;
-		if (next > end)
-			next = end;
+		vnext = (vaddr & PUD_MASK) + PUD_SIZE;
+		if (vnext > vend)
+			vnext = vend;
 
 		if (pud_present(*pud)) {
 			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			ident_pmd_init(info, pmd, vaddr - off, vnext - off);
 			continue;
 		}
 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
 		if (!pmd)
 			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		ident_pmd_init(info, pmd, vaddr - off, vnext - off);
 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
 	}
 
@@ -46,21 +53,24 @@ static int ident_pud_init(struct x86_map
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
 			      unsigned long addr, unsigned long end)
 {
-	unsigned long next;
 	int result;
-	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
+	unsigned long off = info->kernel_mapping ? __PAGE_OFFSET : 0;
+	unsigned long vaddr = addr + off;
+	unsigned long vend = end + off;
+	unsigned long vnext;
 
-	for (; addr < end; addr = next) {
-		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+	for (; vaddr < vend; vaddr = vnext) {
+		pgd_t *pgd = pgd_page + pgd_index(vaddr);
 		pud_t *pud;
 
-		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
-		if (next > end)
-			next = end;
+		vnext = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
+		if (vnext > vend)
+			vnext = vend;
 
 		if (pgd_present(*pgd)) {
 			pud = pud_offset(pgd, 0);
-			result = ident_pud_init(info, pud, addr, next);
+			result = ident_pud_init(info, pud, vaddr - off,
+						vnext - off);
 			if (result)
 				return result;
 			continue;
@@ -69,7 +79,7 @@ int kernel_ident_mapping_init(struct x86
 		pud = (pud_t *)info->alloc_pgt_page(info->context);
 		if (!pud)
 			return -ENOMEM;
-		result = ident_pud_init(info, pud, addr, next);
+		result = ident_pud_init(info, pud, vaddr - off, vnext - off);
 		if (result)
 			return result;
 		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ