lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:	Tue, 3 Feb 2015 23:30:12 +0800
From:	Baoquan He <bhe@...hat.com>
To:	"H. Peter Anvin" <hpa@...or.com>
Cc:	linux-kernel@...r.kernel.org, tglx@...utronix.de, mingo@...hat.com,
	x86@...nel.org, keescook@...omium.org, vgoyal@...hat.com,
	whissi@...ssi.de
Subject: Re: [PATCH 0/6] randomize kernel physical address and virtual
 address separately

On 02/02/15 at 08:42am, H. Peter Anvin wrote:
> On 01/20/2015 07:37 PM, Baoquan He wrote:
> >
> >Leftover problem:
> >     hpa want to see the physical randomization can cover the whole physical memory. I
> >checked code and found it's hard to do. Because in arch/x86/boot/compressed/head_64.S
> >an identity mapping of 4G is built and then kaslr and decompressing are done. The #PF
> >handler solution which he suggested is only available after jump into decompressed
> >kernel, namely in arch/x86/kernel/head_64.S. I didn't think of a way to do the whole
> >memory covering for physical address randomization, any suggestion or idea?
> >
> 
> Basically, it means adding an IDT and #PF handler to the
> decompression code.  Not really all that complex.

Hi hpa,

Thanks for suggestion.

Now I am working on this way. Andy provided a patch which add an IDT
in the boot/compressed load stage and only print page fault address.
I applied this patch and implemented the #PF handler function, and
it works to build the identity mapping when page fault happened above
4G. However it always reboot to BIOS because of general protection
fault after kernel is reloaded and decompressed above 4G. I got this
since Andy add 2 idt entries, X86_TRAP_GP, and X86_TRAP_PF.


---------------------------
1:                                                                                                                                               
        hlt
        jmp     1b
---------------------------
Then I insert above hlt instructions between asm code blocks in
arch/x86/kernel/head_64.S, and found kernel decompression is done and
boot into x86/kernel/head_64.S, then reboot after it execute the jmp
instruction in below code when I hard coded kernel decompression place
to be at 5G.

        /* Ensure I am executing from virtual addresses */
        movq    $1f, %rax      
        jmp     *%rax          
1:  
    
        /* Check if nx is implemented */                                                                                                         
        movl    $0x80000001, %eax       
        cpuid                  
        movl    %edx,%edi

Now I am blocked here since I am not familiar with the x86 arch
registers setting.

Btw, for simplifying the debugging, I made a patch to try how to make it
work when kenrle is relocated above 4G and decompressed there.


diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 6b1766c..74da678 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -123,7 +123,7 @@ ENTRY(startup_32)
 	/* Initialize Page tables to 0 */
 	leal	pgtable(%ebx), %edi
 	xorl	%eax, %eax
-	movl	$((4096*6)/4), %ecx
+	movl	$((4096*10)/4), %ecx
 	rep	stosl
 
 	/* Build Level 4 */
@@ -134,7 +134,7 @@ ENTRY(startup_32)
 	/* Build Level 3 */
 	leal	pgtable + 0x1000(%ebx), %edi
 	leal	0x1007(%edi), %eax
-	movl	$4, %ecx
+	movl	$8, %ecx
 1:	movl	%eax, 0x00(%edi)
 	addl	$0x00001000, %eax
 	addl	$8, %edi
@@ -144,7 +144,7 @@ ENTRY(startup_32)
 	/* Build Level 2 */
 	leal	pgtable + 0x2000(%ebx), %edi
 	movl	$0x00000183, %eax
-	movl	$2048, %ecx
+	movl	$4096, %ecx
 1:	movl	%eax, 0(%edi)
 	addl	$0x00200000, %eax
 	addl	$8, %edi
@@ -476,4 +476,4 @@ boot_stack_end:
 	.section ".pgtable","a",@nobits
 	.balign 4096
 pgtable:
-	.fill 6*4096, 1, 0
+	.fill 10*4096, 1, 0
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index a950864..47c8c80 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -404,6 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
 	output = choose_kernel_location(input_data, input_len, output,
 					output_len > run_size ? output_len
 							      : run_size);
+	output = 0x140000000;
 
 	/* Validate memory location choices. */
 	if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
@@ -427,8 +428,10 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
 	 * 32-bit always performs relocations. 64-bit relocations are only
 	 * needed if kASLR has chosen a different load address.
 	 */
+#if 0
 	if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
 		handle_relocations(output, output_len);
+#endif
 	debug_putstr("done.\nBooting the kernel.\n");
 	return output;
 }


> 
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ