lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+icZUVPzMnEe-VUabCCA_Kb9X00NZTUoms1Q0Wm6sK-5fHn=A@mail.gmail.com>
Date:   Wed, 15 Jul 2020 10:54:49 +0200
From:   Sedat Dilek <sedat.dilek@...il.com>
To:     Arvind Sankar <nivedita@...m.mit.edu>
Cc:     Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
        "H. Peter Anvin" <hpa@...or.com>, x86@...nel.org,
        Nick Desaulniers <ndesaulniers@...gle.com>,
        Fangrui Song <maskray@...gle.com>,
        Dmitry Golovin <dima@...ovin.in>,
        Clang-Built-Linux ML <clang-built-linux@...glegroups.com>,
        Ard Biesheuvel <ardb@...nel.org>,
        Masahiro Yamada <masahiroy@...nel.org>,
        Daniel Kiper <daniel.kiper@...cle.com>,
        Kees Cook <keescook@...omium.org>,
        Nathan Chancellor <natechancellor@...il.com>,
        Arnd Bergmann <arnd@...db.de>,
        "H . J . Lu" <hjl@...rceware.org>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 3/7] x86/boot/compressed: Get rid of GOT fixup code

On Wed, Jul 15, 2020 at 2:41 AM Arvind Sankar <nivedita@...m.mit.edu> wrote:
>
> From: Ard Biesheuvel <ardb@...nel.org>
>
> In a previous patch, we have eliminated GOT entries from the decompressor
> binary and added an assertion that the .got section is empty. This means
> that the GOT fixup routines that exist in both the 32-bit and 64-bit
> startup routines have become dead code, and can be removed.
>
> While at it, drop the KEEP() from the linker script, as it has no effect
> on the contents of output sections that are created by the linker itself.
>
> Reviewed-by: Kees Cook <keescook@...omium.org>
> Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
> Acked-by: Arvind Sankar <nivedita@...m.mit.edu>
> Signed-off-by: Arvind Sankar <nivedita@...m.mit.edu>
> From: Ard Biesheuvel <ardb@...nel.org>
> Link: https://lore.kernel.org/r/20200523120021.34996-4-ardb@kernel.org

Tested-by: Sedat Dilek <sedat.dilek@...il.com>

- Sedat -

> ---
>  arch/x86/boot/compressed/head_32.S     | 24 ++---------
>  arch/x86/boot/compressed/head_64.S     | 57 --------------------------
>  arch/x86/boot/compressed/vmlinux.lds.S |  4 +-
>  3 files changed, 5 insertions(+), 80 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
> index 03557f2174bf..39f0bb43218f 100644
> --- a/arch/x86/boot/compressed/head_32.S
> +++ b/arch/x86/boot/compressed/head_32.S
> @@ -49,16 +49,13 @@
>   * Position Independent Executable (PIE) so that linker won't optimize
>   * R_386_GOT32X relocation to its fixed symbol address.  Older
>   * linkers generate R_386_32 relocations against locally defined symbols,
> - * _bss, _ebss, _got, _egot and _end, in PIE.  It isn't wrong, just less
> - * optimal than R_386_RELATIVE.  But the x86 kernel fails to properly handle
> - * R_386_32 relocations when relocating the kernel.  To generate
> - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as
> - * hidden:
> + * _bss, _ebss and _end, in PIE.  It isn't wrong, just less optimal than
> + * R_386_RELATIVE.  But the x86 kernel fails to properly handle R_386_32
> + * relocations when relocating the kernel.  To generate R_386_RELATIVE
> + * relocations, we mark _bss, _ebss and _end as hidden:
>   */
>         .hidden _bss
>         .hidden _ebss
> -       .hidden _got
> -       .hidden _egot
>         .hidden _end
>
>         __HEAD
> @@ -192,19 +189,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
>         shrl    $2, %ecx
>         rep     stosl
>
> -/*
> - * Adjust our own GOT
> - */
> -       leal    _got(%ebx), %edx
> -       leal    _egot(%ebx), %ecx
> -1:
> -       cmpl    %ecx, %edx
> -       jae     2f
> -       addl    %ebx, (%edx)
> -       addl    $4, %edx
> -       jmp     1b
> -2:
> -
>  /*
>   * Do the extraction, and jump to the new kernel..
>   */
> diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
> index 97d37f0a34f5..bf1ab30acc5b 100644
> --- a/arch/x86/boot/compressed/head_64.S
> +++ b/arch/x86/boot/compressed/head_64.S
> @@ -40,8 +40,6 @@
>   */
>         .hidden _bss
>         .hidden _ebss
> -       .hidden _got
> -       .hidden _egot
>         .hidden _end
>
>         __HEAD
> @@ -353,25 +351,6 @@ SYM_CODE_START(startup_64)
>         /* Set up the stack */
>         leaq    boot_stack_end(%rbx), %rsp
>
> -       /*
> -        * paging_prepare() and cleanup_trampoline() below can have GOT
> -        * references. Adjust the table with address we are running at.
> -        *
> -        * Zero RAX for adjust_got: the GOT was not adjusted before;
> -        * there's no adjustment to undo.
> -        */
> -       xorq    %rax, %rax
> -
> -       /*
> -        * Calculate the address the binary is loaded at and use it as
> -        * a GOT adjustment.
> -        */
> -       call    1f
> -1:     popq    %rdi
> -       subq    $1b, %rdi
> -
> -       call    .Ladjust_got
> -
>         /*
>          * At this point we are in long mode with 4-level paging enabled,
>          * but we might want to enable 5-level paging or vice versa.
> @@ -464,21 +443,6 @@ trampoline_return:
>         pushq   $0
>         popfq
>
> -       /*
> -        * Previously we've adjusted the GOT with address the binary was
> -        * loaded at. Now we need to re-adjust for relocation address.
> -        *
> -        * Calculate the address the binary is loaded at, so that we can
> -        * undo the previous GOT adjustment.
> -        */
> -       call    1f
> -1:     popq    %rax
> -       subq    $1b, %rax
> -
> -       /* The new adjustment is the relocation address */
> -       movq    %rbx, %rdi
> -       call    .Ladjust_got
> -
>  /*
>   * Copy the compressed kernel to the end of our buffer
>   * where decompression in place becomes safe.
> @@ -556,27 +520,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
>         jmp     *%rax
>  SYM_FUNC_END(.Lrelocated)
>
> -/*
> - * Adjust the global offset table
> - *
> - * RAX is the previous adjustment of the table to undo (use 0 if it's the
> - * first time we touch GOT).
> - * RDI is the new adjustment to apply.
> - */
> -.Ladjust_got:
> -       /* Walk through the GOT adding the address to the entries */
> -       leaq    _got(%rip), %rdx
> -       leaq    _egot(%rip), %rcx
> -1:
> -       cmpq    %rcx, %rdx
> -       jae     2f
> -       subq    %rax, (%rdx)    /* Undo previous adjustment */
> -       addq    %rdi, (%rdx)    /* Apply the new adjustment */
> -       addq    $8, %rdx
> -       jmp     1b
> -2:
> -       ret
> -
>         .code32
>  /*
>   * This is the 32-bit trampoline that will be copied over to low memory.
> diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
> index 4bcc943842ab..a4a4a59a2628 100644
> --- a/arch/x86/boot/compressed/vmlinux.lds.S
> +++ b/arch/x86/boot/compressed/vmlinux.lds.S
> @@ -43,9 +43,7 @@ SECTIONS
>                 _erodata = . ;
>         }
>         .got : {
> -               _got = .;
> -               KEEP(*(.got))
> -               _egot = .;
> +               *(.got)
>         }
>         .got.plt : {
>                 *(.got.plt)
> --
> 2.26.2
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ