lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20150702104956.GF4001@pd.tnic>
Date:	Thu, 2 Jul 2015 12:49:56 +0200
From:	Borislav Petkov <bp@...en8.de>
To:	Andy Lutomirski <luto@...nel.org>
Cc:	x86@...nel.org, linux-kernel@...r.kernel.org,
	Frédéric Weisbecker <fweisbec@...il.com>,
	Rik van Riel <riel@...hat.com>,
	Oleg Nesterov <oleg@...hat.com>,
	Denys Vlasenko <vda.linux@...glemail.com>,
	Kees Cook <keescook@...omium.org>,
	Brian Gerst <brgerst@...il.com>, paulmck@...ux.vnet.ibm.com
Subject: Re: [PATCH v4 11/17] x86/entry/64: Migrate 64-bit and compat
 syscalls to new exit hooks

On Mon, Jun 29, 2015 at 12:33:43PM -0700, Andy Lutomirski wrote:
> These need to be migrated together, as the compat case used to jump
> into the middle of the 64-bit exit code.
> 
> Signed-off-by: Andy Lutomirski <luto@...nel.org>
> ---
>  arch/x86/entry/entry_64.S        | 69 +++++-----------------------------------
>  arch/x86/entry/entry_64_compat.S |  7 ++--
>  2 files changed, 11 insertions(+), 65 deletions(-)
> 
> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> index cd9cbc62159c..9bc76766aa71 100644
> --- a/arch/x86/entry/entry_64.S
> +++ b/arch/x86/entry/entry_64.S
> @@ -229,6 +229,11 @@ entry_SYSCALL_64_fastpath:
>  	 */
>  	USERGS_SYSRET64
>  
> +GLOBAL(int_ret_from_sys_call_irqs_off)
> +	TRACE_IRQS_ON
> +	ENABLE_INTERRUPTS(CLBR_NONE)
> +	jmp int_ret_from_sys_call
> +
>  	/* Do syscall entry tracing */
>  tracesys:
>  	movq	%rsp, %rdi
> @@ -272,69 +277,11 @@ tracesys_phase2:
>   * Has correct iret frame.
>   */
>  GLOBAL(int_ret_from_sys_call)
> -	DISABLE_INTERRUPTS(CLBR_NONE)
> -int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */
> -	TRACE_IRQS_OFF
> -	movl	$_TIF_ALLWORK_MASK, %edi
> -	/* edi:	mask to check */
> -GLOBAL(int_with_check)
> -	LOCKDEP_SYS_EXIT_IRQ
> -	GET_THREAD_INFO(%rcx)
> -	movl	TI_flags(%rcx), %edx
> -	andl	%edi, %edx
> -	jnz	int_careful
> -	andl	$~TS_COMPAT, TI_status(%rcx)
> -	jmp	syscall_return
> -
> -	/*
> -	 * Either reschedule or signal or syscall exit tracking needed.
> -	 * First do a reschedule test.
> -	 * edx:	work, edi: workmask
> -	 */
> -int_careful:
> -	bt	$TIF_NEED_RESCHED, %edx
> -	jnc	int_very_careful
> -	TRACE_IRQS_ON
> -	ENABLE_INTERRUPTS(CLBR_NONE)
> -	pushq	%rdi
> -	SCHEDULE_USER
> -	popq	%rdi
> -	DISABLE_INTERRUPTS(CLBR_NONE)
> -	TRACE_IRQS_OFF
> -	jmp	int_with_check
> -
> -	/* handle signals and tracing -- both require a full pt_regs */
> -int_very_careful:
> -	TRACE_IRQS_ON
> -	ENABLE_INTERRUPTS(CLBR_NONE)
>  	SAVE_EXTRA_REGS
> -	/* Check for syscall exit trace */
> -	testl	$_TIF_WORK_SYSCALL_EXIT, %edx
> -	jz	int_signal
> -	pushq	%rdi
> -	leaq	8(%rsp), %rdi			/* &ptregs -> arg1 */
> -	call	syscall_trace_leave
> -	popq	%rdi
> -	andl	$~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU), %edi
> -	jmp	int_restore_rest
> -
> -int_signal:
> -	testl	$_TIF_DO_NOTIFY_MASK, %edx
> -	jz	1f
> -	movq	%rsp, %rdi			/* &ptregs -> arg1 */
> -	xorl	%esi, %esi			/* oldset -> arg2 */
> -	call	do_notify_resume
> -1:	movl	$_TIF_WORK_MASK, %edi
> -int_restore_rest:
> +	movq	%rsp, %rdi
> +	call	syscall_return_slowpath	/* returns with IRQs disabled */
>  	RESTORE_EXTRA_REGS
> -	DISABLE_INTERRUPTS(CLBR_NONE)
> -	TRACE_IRQS_OFF
> -	jmp	int_with_check
> -
> -syscall_return:
> -	/* The IRETQ could re-enable interrupts: */
> -	DISABLE_INTERRUPTS(CLBR_ANY)
> -	TRACE_IRQS_IRETQ
> +	TRACE_IRQS_IRETQ		/* we're about to change IF */
>  
>  	/*
>  	 * Try to use SYSRET instead of IRET if we're returning to

Hallelujah!

/me luvz hunks which remove a bunch of asm :)

> diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
> index efe0b1e499fa..ac0658142ae1 100644
> --- a/arch/x86/entry/entry_64_compat.S
> +++ b/arch/x86/entry/entry_64_compat.S
> @@ -209,10 +209,10 @@ sysexit_from_sys_call:
>  	.endm
>  
>  	.macro auditsys_exit exit
> -	testl	$(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
> -	jnz	ia32_ret_from_sys_call
>  	TRACE_IRQS_ON
>  	ENABLE_INTERRUPTS(CLBR_NONE)
> +	testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
> +	jnz ia32_ret_from_sys_call

I guess you want to use tabs here like the rest of the macro does.

>  	movl	%eax, %esi		/* second arg, syscall return value */
>  	cmpl	$-MAX_ERRNO, %eax	/* is it an error ? */
>  	jbe	1f
> @@ -227,11 +227,10 @@ sysexit_from_sys_call:
>  	testl	%edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
>  	jz	\exit
>  	xorl	%eax, %eax		/* Do not leak kernel information */
> -	movq	%rax, R11(%rsp)

I guess that change needs at least some explanation in the commit
message. AFAIU, this is RIP we shouldn't be zeroing for we need it in
int_ret_from_sys_call...

>  	movq	%rax, R10(%rsp)
>  	movq	%rax, R9(%rsp)
>  	movq	%rax, R8(%rsp)
> -	jmp	int_with_check
> +	jmp	int_ret_from_sys_call_irqs_off
>  	.endm
>  
>  sysenter_auditsys:
> -- 
> 2.4.3
> 
> 

-- 
Regards/Gruss,
    Boris.

ECO tip #101: Trim your mails when you reply.
--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ