lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <6c1b31e24b78b0366c7ca9fa6ac75bdb463d8fc0.1536685746.git.yu.c.chen@intel.com>
Date:   Wed, 12 Sep 2018 01:21:00 +0800
From:   Chen Yu <yu.c.chen@...el.com>
To:     Thomas Gleixner <tglx@...utronix.de>,
        "Rafael J. Wysocki" <rjw@...ysocki.net>
Cc:     Pavel Machek <pavel@....cz>, Rui Zhang <rui.zhang@...el.com>,
        Chen Yu <yu.chen.surf@...il.com>,
        Zhimin Gu <kookoo.gu@...el.com>,
        Len Brown <len.brown@...el.com>, linux-kernel@...r.kernel.org,
        x86@...nel.org, linux-pm@...r.kernel.org
Subject: [PATCH 4/4][v2] x86, hibernate: Backport several fixes from 64bits to 32bits hibernation

From: Zhimin Gu <kookoo.gu@...el.com>

Currently there are mainly three bugs in 32bits system when doing
hibernation:
1. The page copy code is not running in safe page, which might
   cause hang during resume.
2. There's no text mapping for the final jump address
   of the original kernel, which might cause the system jumping
   into illegal address and causes system hang during resume.
3. The restore kernel switches to its own kernel page table(swapper_pg_dir)
   rather than the original kernel page table after all the pages
   been copied back, which might cause invalid virtual-physical
   mapping issue during resume.

To solve these problems:

1. Copy the code core_restore_code to a safe page, to avoid the instruction
   code been overwritten when image kernel pages are being copied.
2. Set up temporary text mapping for the image kernel's jump address,
   so that after all the pages have been copied back, the system could
   jump to this address.
3. Switch to the original kernel page table during resume.

Furthermore, MD5 hash check for e820 map is also backported from 64bits
system.

Acked-by: Chen Yu <yu.c.chen@...el.com>
Cc: "Rafael J. Wysocki" <rjw@...ysocki.net>
Cc: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Zhimin Gu <kookoo.gu@...el.com>
---
 arch/x86/Kconfig                  |  2 +-
 arch/x86/include/asm/suspend_32.h |  4 +++
 arch/x86/power/hibernate.c        |  2 --
 arch/x86/power/hibernate_32.c     | 37 +++++++++++++++++++++++
 arch/x86/power/hibernate_asm_32.S | 49 +++++++++++++++++++++++++------
 5 files changed, 82 insertions(+), 12 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1a0be022f91d..e8de5de1057f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2422,7 +2422,7 @@ menu "Power management and ACPI options"
 
 config ARCH_HIBERNATION_HEADER
 	def_bool y
-	depends on X86_64 && HIBERNATION
+	depends on HIBERNATION
 
 source "kernel/power/Kconfig"
 
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 8be6afb58471..fdbd9d7b7bca 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -32,4 +32,8 @@ struct saved_context {
 	unsigned long return_address;
 } __attribute__((packed));
 
+/* routines for saving/restoring kernel state */
+extern char core_restore_code[];
+extern char restore_registers[];
+
 #endif /* _ASM_X86_SUSPEND_32_H */
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index 6aeac4d3c9df..d719d156114b 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -69,7 +69,6 @@ int pfn_is_nosave(unsigned long pfn)
 	return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
 }
 
-#ifdef CONFIG_X86_64
 static int relocate_restore_code(void)
 {
 	pgd_t *pgd;
@@ -262,4 +261,3 @@ int arch_hibernation_header_restore(void *addr)
 
 	return 0;
 }
-#endif
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index e0e7b9aea22a..d692700047bf 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -135,6 +135,32 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
 #endif
 }
 
+static int set_up_temporary_text_mapping(pgd_t *pgd_base)
+{
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+
+	pgd = pgd_base + pgd_index(restore_jump_address);
+
+	pmd = resume_one_md_table_init(pgd);
+	if (!pmd)
+		return -ENOMEM;
+
+	if (boot_cpu_has(X86_FEATURE_PSE)) {
+		set_pmd(pmd + pmd_index(restore_jump_address),
+		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
+	} else {
+		pte = resume_one_page_table_init(pmd);
+		if (!pte)
+			return -ENOMEM;
+		set_pte(pte + pte_index(restore_jump_address),
+		__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
+	}
+
+	return 0;
+}
+
 asmlinkage int swsusp_arch_resume(void)
 {
 	int error;
@@ -144,10 +170,21 @@ asmlinkage int swsusp_arch_resume(void)
 		return -ENOMEM;
 
 	resume_init_first_level_page_table(resume_pg_dir);
+
+	error = set_up_temporary_text_mapping(resume_pg_dir);
+	if (error)
+		return error;
+
 	error = resume_physical_mapping_init(resume_pg_dir);
 	if (error)
 		return error;
 
+	temp_pgt = __pa(resume_pg_dir);
+
+	error = relocate_restore_code();
+	if (error)
+		return error;
+
 	/* We have got enough memory and from now on we cannot recover */
 	restore_image();
 	return 0;
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 6e56815e13a0..a53b4a41e09a 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -24,21 +24,40 @@ ENTRY(swsusp_arch_suspend)
 	pushfl
 	popl saved_context_eflags
 
+	/* save cr3 */
+	movl	%cr3, %eax
+	movl	%eax, restore_cr3
+
 	call swsusp_save
 	ret
+ENDPROC(swsusp_arch_suspend)
 
 ENTRY(restore_image)
-	movl	mmu_cr4_features, %ecx
-	movl	resume_pg_dir, %eax
-	subl	$__PAGE_OFFSET, %eax
+	/* prepare to jump to the image kernel */
+	movl	restore_jump_address, %ebx
+	movl	restore_cr3, %ebp
+
+	movl	mmu_cr4_features, %edx
+
+	/* jump to relocated restore code */
+	movl	relocated_restore_code, %eax
+	jmpl	*%eax
+
+	/* code below has been relocated to a safe page */
+ENTRY(core_restore_code)
+	movl	temp_pgt, %eax
 	movl	%eax, %cr3
 
+	/* flush TLB */
+	movl	%edx, %ecx
 	jecxz	1f	# cr4 Pentium and higher, skip if zero
 	andl	$~(X86_CR4_PGE), %ecx
 	movl	%ecx, %cr4;  # turn off PGE
 	movl	%cr3, %eax;  # flush TLB
 	movl	%eax, %cr3
+	movl	%edx, %cr4;  # turn PGE back on
 1:
+	/* prepare to copy image data to their original locations */
 	movl	restore_pblist, %edx
 	.p2align 4,,7
 
@@ -49,7 +68,7 @@ copy_loop:
 	movl	pbe_address(%edx), %esi
 	movl	pbe_orig_address(%edx), %edi
 
-	movl	$1024, %ecx
+	movl	$(PAGE_SIZE >> 2), %ecx
 	rep
 	movsl
 
@@ -58,13 +77,22 @@ copy_loop:
 	.p2align 4,,7
 
 done:
+	jmpl	*%ebx
+	.align PAGE_SIZE
+
+ENTRY(restore_registers)
 	/* go back to the original page tables */
-	movl	$swapper_pg_dir, %eax
-	subl	$__PAGE_OFFSET, %eax
-	movl	%eax, %cr3
-	movl	mmu_cr4_features, %ecx
+	movl	%ebp, %cr3
+
+	/* flush TLB */
+	movl	mmu_cr4_features, %edx
+	movl	%edx, %ecx
 	jecxz	1f	# cr4 Pentium and higher, skip if zero
-	movl	%ecx, %cr4;  # turn PGE back on
+	andl	$~(X86_CR4_PGE), %ecx
+	movl	%ecx, %cr4;  # turn off PGE
+	movl	%cr3, %ecx;  # flush TLB
+	movl	%ecx, %cr3;
+	movl	%edx, %cr4;  # turn PGE back on
 1:
 
 	movl saved_context_esp, %esp
@@ -82,4 +110,7 @@ done:
 
 	xorl	%eax, %eax
 
+	movl	%eax, in_suspend
+
 	ret
+ENDPROC(restore_registers)
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ