lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 27 Nov 2008 11:39:01 +0100
From:	"Alexander van Heukelum" <heukelum@...tmail.fm>
To:	"Cyrill Gorcunov" <gorcunov@...il.com>,
	"Ingo Molnar" <mingo@...e.hu>,
	"Thomas Gleixner" <tglx@...utronix.de>,
	"H. Peter Anvin" <hpa@...or.com>
Cc:	linux-kernel@...r.kernel.org,
	"Cyrill Gorcunov" <gorcunov@...il.com>
Subject: Re: [PATCH 5/5] x86: entry_64.S - trivial: space, comments fixup


On Wed, 26 Nov 2008 22:17:04 +0300, gorcunov@...il.com said:
> From: Cyrill Gorcunov <gorcunov@...il.com>
> 
> Impact: cleanup
> 
> Signed-off-by: Cyrill Gorcunov <gorcunov@...il.com>

Acked-by: Alexander van Heukelum <heukelum@...tmail.fm>


>  arch/x86/kernel/entry_64.S |   92
>  ++++++++++++++++++++++---------------------
>  1 files changed, 47 insertions(+), 45 deletions(-)
> 
> diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
> index a21be86..5ccf410 100644
> --- a/arch/x86/kernel/entry_64.S
> +++ b/arch/x86/kernel/entry_64.S
> @@ -1119,8 +1119,8 @@ paranoidzeroentry machine_check do_machine_check
>  zeroentry simd_coprocessor_error do_simd_coprocessor_error
>  
>  	/*
> - 	 * "Paranoid" exit path from exception stack.
> -  	 * Paranoid because this is used by NMIs and cannot take
> +	 * "Paranoid" exit path from exception stack.
> +	 * Paranoid because this is used by NMIs and cannot take
>  	 * any kernel state for granted.
>  	 * We don't do kernel preemption checks here, because only
>  	 * NMI should be common and it does not enable IRQs and
> @@ -1225,7 +1225,7 @@ error_kernelspace:
>  	cmpq %rcx,RIP+8(%rsp)
>  	je error_swapgs
>  	cmpq $gs_change,RIP+8(%rsp)
> -        je error_swapgs
> +	je error_swapgs
>  	jmp error_sti
>  KPROBE_END(error_entry)
>  
> @@ -1249,36 +1249,36 @@ KPROBE_ENTRY(error_exit)
>  	CFI_ENDPROC
>  KPROBE_END(error_exit)
>  
> -       /* Reload gs selector with exception handling */
> -       /* edi:  new selector */
> +	/* Reload gs selector with exception handling */
> +	/* edi:  new selector */
>  ENTRY(native_load_gs_index)
>  	CFI_STARTPROC
>  	pushf
>  	CFI_ADJUST_CFA_OFFSET 8
>  	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
> -        SWAPGS
> +	SWAPGS
>  gs_change:
> -        movl %edi,%gs
> +	movl %edi,%gs
>  2:	mfence		/* workaround */
>  	SWAPGS
> -        popf
> +	popf
>  	CFI_ADJUST_CFA_OFFSET -8
> -        ret
> +	ret
>  	CFI_ENDPROC
>  END(native_load_gs_index)
>  
> -        .section __ex_table,"a"
> -        .align 8
> -        .quad gs_change,bad_gs
> -        .previous
> -        .section .fixup,"ax"
> +	.section __ex_table,"a"
> +	.align 8
> +	.quad gs_change,bad_gs
> +	.previous
> +	.section .fixup,"ax"
>  	/* running with kernelgs */
>  bad_gs:
>  	SWAPGS			/* switch back to user gs */
>  	xorl %eax,%eax
> -        movl %eax,%gs
> -        jmp  2b
> -        .previous
> +	movl %eax,%gs
> +	jmp  2b
> +	.previous
>  
>  /*
>   * Create a kernel thread.
> @@ -1313,7 +1313,7 @@ ENTRY(kernel_thread)
>  	 * so internally to the x86_64 port you can rely on kernel_thread()
>  	 * not to reschedule the child before returning, this avoids the need
>  	 * of hacks for example to fork off the per-CPU idle tasks.
> -         * [Hopefully no generic code relies on the reschedule -AK]
> +	 * [Hopefully no generic code relies on the reschedule -AK]
>  	 */
>  	RESTORE_ALL
>  	UNFAKE_STACK_FRAME
> @@ -1420,7 +1420,7 @@ nmi_schedule:
>  	CFI_ENDPROC
>  #else
>  	jmp paranoid_exit
> - 	CFI_ENDPROC
> +	CFI_ENDPROC
>  #endif
>  KPROBE_END(nmi)
>  
> @@ -1455,22 +1455,24 @@ KPROBE_END(ignore_sysret)
>  zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
>  
>  /*
> -# A note on the "critical region" in our callback handler.
> -# We want to avoid stacking callback handlers due to events occurring
> -# during handling of the last event. To do this, we keep events disabled
> -# until we've done all processing. HOWEVER, we must enable events before
> -# popping the stack frame (can't be done atomically) and so it would
> still
> -# be possible to get enough handler activations to overflow the stack.
> -# Although unlikely, bugs of that kind are hard to track down, so we'd
> -# like to avoid the possibility.
> -# So, on entry to the handler we detect whether we interrupted an
> -# existing activation in its critical region -- if so, we pop the
> current
> -# activation and restart the handler using the previous one.
> -*/
> + * A note on the "critical region" in our callback handler.
> + * We want to avoid stacking callback handlers due to events occurring
> + * during handling of the last event. To do this, we keep events
> disabled
> + * until we've done all processing. HOWEVER, we must enable events
> before
> + * popping the stack frame (can't be done atomically) and so it would
> still
> + * be possible to get enough handler activations to overflow the stack.
> + * Although unlikely, bugs of that kind are hard to track down, so we'd
> + * like to avoid the possibility.
> + * So, on entry to the handler we detect whether we interrupted an
> + * existing activation in its critical region -- if so, we pop the
> current
> + * activation and restart the handler using the previous one.
> + */
>  ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct
>  *pt_regs)
>  	CFI_STARTPROC
> -/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
> -   see the correct pointer to the pt_regs */
> +/*
> + * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
> + * see the correct pointer to the pt_regs
> + */
>  	movq %rdi, %rsp            # we don't return, adjust the stack frame
>  	CFI_ENDPROC
>  	DEFAULT_FRAME
> @@ -1488,18 +1490,18 @@ ENTRY(xen_do_hypervisor_callback)   #
> do_hypervisor_callback(struct *pt_regs)
>  END(do_hypervisor_callback)
>  
>  /*
> -# Hypervisor uses this for application faults while it executes.
> -# We get here for two reasons:
> -#  1. Fault while reloading DS, ES, FS or GS
> -#  2. Fault while executing IRET
> -# Category 1 we do not need to fix up as Xen has already reloaded all
> segment
> -# registers that could be reloaded and zeroed the others.
> -# Category 2 we fix up by killing the current process. We cannot use the
> -# normal Linux return path in this case because if we use the IRET
> hypercall
> -# to pop the stack frame we end up in an infinite loop of failsafe
> callbacks.
> -# We distinguish between categories by comparing each saved segment
> register
> -# with its current contents: any discrepancy means we in category 1.
> -*/
> + * Hypervisor uses this for application faults while it executes.
> + * We get here for two reasons:
> + *  1. Fault while reloading DS, ES, FS or GS
> + *  2. Fault while executing IRET
> + * Category 1 we do not need to fix up as Xen has already reloaded all
> segment
> + * registers that could be reloaded and zeroed the others.
> + * Category 2 we fix up by killing the current process. We cannot use
> the
> + * normal Linux return path in this case because if we use the IRET
> hypercall
> + * to pop the stack frame we end up in an infinite loop of failsafe
> callbacks.
> + * We distinguish between categories by comparing each saved segment
> register
> + * with its current contents: any discrepancy means we in category 1.
> + */
>  ENTRY(xen_failsafe_callback)
>  	INTR_FRAME 1 (6*8)
>  	/*CFI_REL_OFFSET gs,GS*/
> -- 
> 1.6.0.4.603.gbc9c0
> 
-- 
  Alexander van Heukelum
  heukelum@...tmail.fm

-- 
http://www.fastmail.fm - The way an email service should be

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ