lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 24 May 2011 22:05:30 -0400
From:	Dan Rosenberg <drosenberg@...curity.com>
To:	Tony Luck <tony.luck@...il.com>
Cc:	linux-kernel@...r.kernel.org, davej@...hat.com,
	kees.cook@...onical.com, davem@...emloft.net, eranian@...gle.com,
	torvalds@...ux-foundation.org, adobriyan@...il.com,
	penberg@...nel.org, hpa@...or.com,
	Arjan van de Ven <arjan@...radead.org>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Valdis.Kletnieks@...edu, Ingo Molnar <mingo@...e.hu>,
	pageexec@...email.hu
Subject: Re: [RFC][PATCH] Randomize kernel base address on boot

On Tue, 2011-05-24 at 19:08 -0400, Dan Rosenberg wrote:
> On Tue, 2011-05-24 at 16:31 -0400, Dan Rosenberg wrote:
> > This introduces CONFIG_RANDOMIZE_BASE, which randomizes the address at
> > which the kernel is decompressed at boot as a security feature that
> > deters exploit attempts relying on knowledge of the location of kernel
> > internals.  The default values of the kptr_restrict and dmesg_restrict
> > sysctls are set to (1) when this is enabled, since hiding kernel
> > pointers is necessary to preserve the secrecy of the randomized base
> > address.
> 
> > diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
> > index 67a655a..2680db0 100644
> > --- a/arch/x86/boot/compressed/head_32.S
> > +++ b/arch/x86/boot/compressed/head_32.S
> > @@ -69,12 +69,75 @@ ENTRY(startup_32)
> >   */
> >  
> >  #ifdef CONFIG_RELOCATABLE
> > +#ifdef CONFIG_RANDOMIZE_BASE
> > +
> > +	/* Standard check for cpuid */
> > +	pushfl
> > +	popl	%eax
> > +	movl	%eax, %ebx
> > +	xorl	$0x200000, %eax
> > +	pushl	%eax
> > +	popfl
> > +	pushfl
> > +	popl	%eax
> > +	cmpl	%eax, %ebx
> > +	jz	4f
> > +
> > +	/* Check for cpuid 1 */
> > +	movl	$0x0, %eax
> > +	cpuid
> > +	cmpl	$0x1, %eax
> > +	jb	4f
> > +
> > +	movl	$0x1, %eax
> > +	cpuid
> > +	xor	%eax, %eax
> > +
> > +	/* RDRAND is bit 30 */
> > +	testl	$0x4000000, %ecx
> > +	jnz	1f
> > +
> > +	/* RDTSC is bit 4 */
> > +	testl	$0x10, %edx
> > +	jnz	3f
> > +
> > +	/* Nothing is supported */
> > +	jmp	4f
> > +1:
> > +	/* RDRAND sets carry bit on success, otherwise we should try
> > +	 * again. */
> > +	movl	$0x10, %ecx
> > +2:
> > +	/* rdrand %eax */
> > +	.byte	0x0f, 0xc7, 0xf0
> > +	jc	4f
> > +	loop	2b
> > +
> > +	/* Fall through: if RDRAND is supported but fails, use RDTSC,
> > +	 * which is guaranteed to be supported. */
> > +3:
> > +	rdtsc
> > +	shll	$0xc, %eax
> > +4:
> > +	/* Maximum offset at 64mb to be safe */
> > +	andl	$0x3ffffff, %eax
> > +	movl	%ebp, %ebx
> > +	addl	%eax, %ebx
> > +#else
> >  	movl	%ebp, %ebx
> > +#endif
> >  	movl	BP_kernel_alignment(%esi), %eax
> >  	decl	%eax
> >  	addl    %eax, %ebx
> >  	notl	%eax
> >  	andl    %eax, %ebx
> > +
> > +	/* LOAD_PHSYICAL_ADDR is the minimum safe address we can
> > +	 * decompress at. */
> > +	cmpl	$LOAD_PHYSICAL_ADDR, %ebx
> > +	jae	1f
> > +	movl	$LOAD_PHYSICAL_ADDR, %ebx
> > +1:
> >  #else
> >  	movl	$LOAD_PHYSICAL_ADDR, %ebx
> >  #endif
> > diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
> > index 35af09d..6a05219 100644
> > --- a/arch/x86/boot/compressed/head_64.S
> > +++ b/arch/x86/boot/compressed/head_64.S
> > @@ -90,6 +90,13 @@ ENTRY(startup_32)
> >  	addl	%eax, %ebx
> >  	notl	%eax
> >  	andl	%eax, %ebx
> > +
> > +	/* LOAD_PHYSICAL_ADDR is the minimum safe address we can
> > +	 * decompress at. */
> > +	cmpl	$LOAD_PHYSICAL_ADDR, %ebx
> > +	jae	1f
> > +	movl	$LOAD_PHYSICAL_ADDR, %ebx
> > +1:
> >  #else
> >  	movl	$LOAD_PHYSICAL_ADDR, %ebx
> >  #endif
> > @@ -191,7 +198,7 @@ no_longmode:
> >  	 * it may change in the future.
> >  	 */
> >  	.code64
> > -	.org 0x200
> > +	.org 0x300
> >  ENTRY(startup_64)
> >  	/*
> >  	 * We come here either from startup_32 or directly from a
> > @@ -232,6 +239,13 @@ ENTRY(startup_64)
> >  	addq	%rax, %rbp
> >  	notq	%rax
> >  	andq	%rax, %rbp
> > +
> > +	/* LOAD_PHYSICAL_ADDR is the minimum safe address we can
> > +	 * decompress at. */
> > +	cmpq	$LOAD_PHYSICAL_ADDR, %rbp
> > +	jae	1f
> > +	movq	$LOAD_PHYSICAL_ADDR, %rbp
> > +1:
> >  #else
> >  	movq	$LOAD_PHYSICAL_ADDR, %rbp
> >  #endif
> 
> Thanks to Kees Cook for noticing that I didn't clear %eax before jumping
> to my "nothing supported" (4) label.  This would have just used the
> flags as "randomness", but it's still wrong and I'll fix it.  Next
> version will have a fallback of using the BIOS signature instead anyway.
> 

Also thanks to someone who prefers to remain nameless for pointing out
that this logic also results in the kernel being loaded at
LOAD_PHYSICAL_ADDR about one in four times (because it rounds up).  This
will be fixed as well.

-Dan

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ