lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMj1kXEgMo0udCiup9hU9+C9X9Bq7EsSKvr2ddaWy6_4F1_eCg@mail.gmail.com>
Date: Sun, 18 Jan 2026 19:09:34 +0100
From: Ard Biesheuvel <ardb@...nel.org>
To: Brian Gerst <brgerst@...il.com>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org, 
	Ingo Molnar <mingo@...nel.org>, "H . Peter Anvin" <hpa@...or.com>, Thomas Gleixner <tglx@...utronix.de>, 
	Borislav Petkov <bp@...en8.de>, "Rafael J. Wysocki" <rafael@...nel.org>
Subject: Re: [PATCH] x86/acpi: Remove indirect jump from wakeup_long64()

On Sun, 18 Jan 2026 at 18:05, Brian Gerst <brgerst@...il.com> wrote:
>
> wakeup_long64() is called from common_startup_64() via inital_code, so

initial_code

> it is already running on the normal virtual mapping.  There is no need
> to use an indirect jump since it is not switching mappings.
>

By the same reasoning (i.e., that everything executes in the kernel
virtual mapping) you might also convert the movq on line 55 into a
RIP-relative leaq, as it is the only remaining non-RIP relative access
in the file. That way, I can drop this file from my PIE series too.

But regardless of that,

Acked-by: Ard Biesheuvel <ardb@...nel.org>


> Remove the indirect jump by embedding wakeup_long64() as an inner label
> of do_suspend_lowlevel().  Remove saved_rip is which is now unused.
>
> No functional change.
>
> Signed-off-by: Brian Gerst <brgerst@...il.com>
> Cc: Ard Biesheuvel <ardb@...nel.org>
> Cc: "Rafael J. Wysocki" <rafael@...nel.org>
> ---
>  arch/x86/kernel/acpi/wakeup_64.S | 64 +++++++++++++-------------------
>  1 file changed, 26 insertions(+), 38 deletions(-)
>
> diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
> index 04f561f75e99..a256cdd03ab5 100644
> --- a/arch/x86/kernel/acpi/wakeup_64.S
> +++ b/arch/x86/kernel/acpi/wakeup_64.S
> @@ -13,39 +13,6 @@
>  # Copyright 2003 Pavel Machek <pavel@...e.cz
>
>  .code64
> -       /*
> -        * Hooray, we are in Long 64-bit mode (but still running in low memory)
> -        */
> -SYM_FUNC_START(wakeup_long64)
> -       ANNOTATE_NOENDBR
> -       movq    saved_magic(%rip), %rax
> -       movq    $0x123456789abcdef0, %rdx
> -       cmpq    %rdx, %rax
> -       je      2f
> -
> -       /* stop here on a saved_magic mismatch */
> -       movq $0xbad6d61676963, %rcx
> -1:
> -       jmp 1b
> -2:
> -       movw    $__KERNEL_DS, %ax
> -       movw    %ax, %ss
> -       movw    %ax, %ds
> -       movw    %ax, %es
> -       movw    %ax, %fs
> -       movw    %ax, %gs
> -       movq    saved_rsp(%rip), %rsp
> -
> -       movq    saved_rbx(%rip), %rbx
> -       movq    saved_rdi(%rip), %rdi
> -       movq    saved_rsi(%rip), %rsi
> -       movq    saved_rbp(%rip), %rbp
> -
> -       movq    saved_rip(%rip), %rax
> -       ANNOTATE_RETPOLINE_SAFE
> -       jmp     *%rax
> -SYM_FUNC_END(wakeup_long64)
> -
>  SYM_FUNC_START(do_suspend_lowlevel)
>         FRAME_BEGIN
>         subq    $8, %rsp
> @@ -71,8 +38,6 @@ SYM_FUNC_START(do_suspend_lowlevel)
>         pushfq
>         popq    pt_regs_flags(%rax)
>
> -       movq    $.Lresume_point, saved_rip(%rip)
> -
>         movq    %rsp, saved_rsp(%rip)
>         movq    %rbp, saved_rbp(%rip)
>         movq    %rbx, saved_rbx(%rip)
> @@ -86,9 +51,27 @@ SYM_FUNC_START(do_suspend_lowlevel)
>         /* in case something went wrong, restore the machine status and go on */
>         jmp     .Lresume_point
>
> -       .align 4
> -.Lresume_point:
> +SYM_INNER_LABEL_ALIGN(wakeup_long64, SYM_L_GLOBAL)
>         ANNOTATE_NOENDBR
> +       movq    saved_magic(%rip), %rax
> +       movq    $0x123456789abcdef0, %rdx
> +       cmpq    %rdx, %rax
> +       jne     .Lbad_saved_magic
> +
> +       movw    $__KERNEL_DS, %ax
> +       movw    %ax, %ss
> +       movw    %ax, %ds
> +       movw    %ax, %es
> +       movw    %ax, %fs
> +       movw    %ax, %gs
> +       movq    saved_rsp(%rip), %rsp
> +
> +       movq    saved_rbx(%rip), %rbx
> +       movq    saved_rdi(%rip), %rdi
> +       movq    saved_rsi(%rip), %rsi
> +       movq    saved_rbp(%rip), %rbp
> +
> +.Lresume_point:
>         /* We don't restore %rax, it must be 0 anyway */
>         movq    $saved_context, %rax
>         movq    saved_context_cr4(%rax), %rbx
> @@ -130,6 +113,12 @@ SYM_FUNC_START(do_suspend_lowlevel)
>         addq    $8, %rsp
>         FRAME_END
>         jmp     restore_processor_state
> +
> +.Lbad_saved_magic:
> +       /* stop here on a saved_magic mismatch */
> +       movq $0xbad6d61676963, %rcx
> +1:
> +       jmp 1b
>  SYM_FUNC_END(do_suspend_lowlevel)
>  STACK_FRAME_NON_STANDARD do_suspend_lowlevel
>
> @@ -139,7 +128,6 @@ saved_rsi:          .quad   0
>  saved_rdi:             .quad   0
>  saved_rbx:             .quad   0
>
> -saved_rip:             .quad   0
>  saved_rsp:             .quad   0
>
>  SYM_DATA(saved_magic,  .quad   0)
>
> base-commit: 72249a0533c63e77e4bf56012b7b4f8fb3066317
> --
> 2.52.0
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ