lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3e5651a1-2c3f-47db-e421-6f57b834ca7e@loongson.cn>
Date:   Fri, 10 Feb 2023 17:37:01 +0800
From:   Youling Tang <tangyouling@...ngson.cn>
To:     Huacai Chen <chenhuacai@...nel.org>
Cc:     Xi Ruoyao <xry111@...111.site>, Jinyang He <hejinyang@...ngson.cn>,
        Xuerui Wang <kernel@...0n.name>, loongarch@...ts.linux.dev,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 5/5] LoongArch: Add support for kernel address space
 layout randomization (KASLR)



On 02/10/2023 05:06 PM, Youling Tang wrote:
>
>
> On 02/10/2023 04:47 PM, Youling Tang wrote:
>> This patch adds support for relocating the kernel to a random address.
>>
>> Entropy is derived from the banner, which will change every build and
>> random_get_entropy() which should provide additional runtime entropy.
>>
>> The kernel is relocated by up to RANDOMIZE_BASE_MAX_OFFSET bytes from
>> its link address. Because relocation happens so early in the kernel boot,
>> the amount of physical memory has not yet been determined. This means
>> the only way to limit relocation within the available memory is via
>> Kconfig. Limit the maximum value of RANDOMIZE_BASE_MAX_OFFSET to
>> 256M(0x10000000) because our memory layout has many holes.
>>
>> Signed-off-by: Youling Tang <tangyouling@...ngson.cn>
>> Signed-off-by: Xi Ruoyao <xry111@...111.site> # Fix compiler warnings
>> ---
>>  arch/loongarch/Kconfig           |  23 +++++
>>  arch/loongarch/kernel/head.S     |  14 ++-
>>  arch/loongarch/kernel/relocate.c | 143 ++++++++++++++++++++++++++++++-
>>  3 files changed, 176 insertions(+), 4 deletions(-)
>>
>> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
>> index 089a4695b1b3..f0a070bd7254 100644
>> --- a/arch/loongarch/Kconfig
>> +++ b/arch/loongarch/Kconfig
>> @@ -489,6 +489,29 @@ config RELOCATABLE
>>        kernel binary at runtime to a different virtual address than the
>>        address it was linked at.
>>
>> +config RANDOMIZE_BASE
>> +    bool "Randomize the address of the kernel image (KASLR)"
>> +    depends on RELOCATABLE
>> +    help
>> +       Randomizes the physical and virtual address at which the
>> +       kernel image is loaded, as a security feature that
>> +       deters exploit attempts relying on knowledge of the location
>> +       of kernel internals.
>> +
>> +       The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
>> +
>> +       If unsure, say N.
>> +
>> +config RANDOMIZE_BASE_MAX_OFFSET
>> +    hex "Maximum KASLR offset" if EXPERT
>> +    depends on RANDOMIZE_BASE
>> +    range 0x0 0x10000000 if 64BIT
>> +    default "0x01000000"
>> +    help
>> +      When KASLR is active, this provides the maximum offset that will
>> +      be applied to the kernel image.
>> +
>> +
>>  config SECCOMP
>>      bool "Enable seccomp to safely compute untrusted bytecode"
>>      depends on PROC_FS
>> diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
>> index 499edc80d8ab..b12f459ad73a 100644
>> --- a/arch/loongarch/kernel/head.S
>> +++ b/arch/loongarch/kernel/head.S
>> @@ -87,10 +87,22 @@ SYM_CODE_START(kernel_entry)            # kernel
>> entry point
>>      set_saved_sp    sp, t0, t1
>>
>>  #ifdef CONFIG_RELOCATABLE
>> +#ifdef CONFIG_RANDOMIZE_BASE
>> +    bl        do_kaslr
>> +
>> +    /* Repoint the sp into the new kernel image */
>> +    PTR_LI        sp, (_THREAD_SIZE - PT_SIZE)
>> +    PTR_ADD        sp, sp, tp
>> +    set_saved_sp    sp, t0, t1
>> +
>> +    /* do_kaslr returns the new kernel image entry point */
>> +    jr        a0
>> +    ASM_BUG()
>> +#else
>>      /* Apply the relocations */
>>      bl        relocate_kernel
>>  #endif
>> -
>> +#endif
>>      bl        start_kernel
>>      ASM_BUG()
>>
>> diff --git a/arch/loongarch/kernel/relocate.c
>> b/arch/loongarch/kernel/relocate.c
>> index 91ce92433bab..5266f23a3006 100644
>> --- a/arch/loongarch/kernel/relocate.c
>> +++ b/arch/loongarch/kernel/relocate.c
>> @@ -9,19 +9,21 @@
>>  #include <linux/kernel.h>
>>  #include <linux/printk.h>
>>  #include <linux/panic_notifier.h>
>> +#include <linux/start_kernel.h>
>> +#include <asm/bootinfo.h>
>> +#include <asm/early_ioremap.h>
>>  #include <asm/sections.h>
>>
>>  #define RELOCATED(x) ((void *)((long)x + reloc_offset))
>> +#define RELOCATED_KASLR(x) ((void *)((long)x + offset))
>>
>>  extern long __rela_dyn_start;
>>  extern long __rela_dyn_end;
>>
>>  static unsigned long reloc_offset;
>>
>> -void __init relocate_kernel(void)
>> +static inline __init void relocate_relative(void)
>>  {
>> -    reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
>> -
>>      if (reloc_offset) {
>>          Elf64_Rela *rela, *rela_end;
>>          rela = (Elf64_Rela *)&__rela_dyn_start;
>> @@ -43,6 +45,141 @@ void __init relocate_kernel(void)
>>      }
>>  }
>>
>> +#ifdef CONFIG_RANDOMIZE_BASE
>> +static inline __init unsigned long rotate_xor(unsigned long hash,
>> +                          const void *area, size_t size)
>> +{
>> +    size_t i;
>> +    unsigned long *ptr = (unsigned long *)area;
>> +
>> +    for (i = 0; i < size / sizeof(hash); i++) {
>> +        /* Rotate by odd number of bits and XOR. */
>> +        hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
>> +        hash ^= ptr[i];
>> +    }
>> +
>> +    return hash;
>> +}
>> +
>> +static inline __init unsigned long get_random_boot(void)
>> +{
>> +    unsigned long entropy = random_get_entropy();
>> +    unsigned long hash = 0;
>> +
>> +    /* Attempt to create a simple but unpredictable starting entropy. */
>> +    hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
>> +
>> +    /* Add in any runtime entropy we can get */
>> +    hash = rotate_xor(hash, &entropy, sizeof(entropy));
>> +
>> +    return hash;
>> +}
>> +
>> +static inline __init bool kaslr_disabled(void)
>> +{
>> +    char *str;
>> +
>> +    str = strstr(boot_command_line, "nokaslr");
>> +    if (str == boot_command_line || (str > boot_command_line && *(str
>> - 1) == ' '))
>> +        return true;
>> +
>> +    return false;
>> +}
>> +
>> +/* Choose a new address for the kernel */
>> +static inline void __init *determine_relocation_address(void)
>> +{
>> +    unsigned long kernel_length;
>> +    void *dest = _text;
>> +    unsigned long offset;
>> +
>> +    if (kaslr_disabled())
>> +        return dest;
>> +
>> +    kernel_length = (long)_end - (long)_text;
>> +
>> +    offset = get_random_boot() << 16;
>> +    offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
>> +    if (offset < kernel_length)
>> +        offset += ALIGN(kernel_length, 0xffff);
>> +
>> +    return RELOCATED_KASLR(dest);
>> +}
>> +
>> +static inline int __init relocation_addr_valid(void *loc_new)
>> +{
>> +    if ((unsigned long)loc_new & 0x00000ffff) {
>> +        /* Inappropriately aligned new location */
>> +        return 0;
>> +    }
>> +    if ((unsigned long)loc_new < (unsigned long)_end) {
>> +        /* New location overlaps original kernel */
>> +        return 0;
>> +    }
>> +    return 1;
>> +}
>> +
>> +static inline void __init update_reloc_offset(unsigned long *addr,
>> long offset)
>> +{
>> +    unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr);
>> +
>> +    *new_addr = (unsigned long)offset;
>> +}
>> +
>> +void *__init do_kaslr(void)
>> +{
>> +    void *loc_new;
>> +    unsigned long kernel_length;
>> +    long offset = 0;
>> +    /* Default to original kernel entry point */
>> +    void *kernel_entry = start_kernel;
>> +    char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE);
>> +
>> +    /* Boot command line was passed in fw_arg1 */
>> +    strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
>> +
>> +    kernel_length = (long)(_end) - (long)(_text);
>> +
>> +    loc_new = determine_relocation_address();
>> +
>> +    /* Sanity check relocation address */
>> +    if (relocation_addr_valid(loc_new))
>> +        offset = (unsigned long)loc_new - (unsigned long)(_text);
>> +
>> +    if (offset) {
>> +        /* Copy the kernel to it's new location */
>> +        memcpy(loc_new, _text, kernel_length);
>> +
>> +        /* Sync the caches ready for execution of new kernel */
>> +        __asm__ __volatile__ (
>> +            "ibar 0 \t\n"
>> +            "dbar 0 \t\n");
>> +
>> +        reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
>> +        reloc_offset += offset;
>> +
>> +        relocate_relative();
>> +
>> +        /* The current thread is now within the relocated image */
>> +        __current_thread_info = RELOCATED_KASLR(__current_thread_info);
>> +
>> +        /* Return the new kernel's entry point */
>> +        kernel_entry = RELOCATED_KASLR(start_kernel);
>> +
>> +        update_reloc_offset(&reloc_offset, offset);
>> +    }
>
> Self review:
>
> There is a problem with do_kaslr implementation, which will be fixed in
> the next version.

When offset is 0, but reloc_offset is not 0, relocate_relative() also
needs to be executed.

It will be modified as follows:

--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -146,6 +146,8 @@ void *__init do_kaslr(void)
         if (relocation_addr_valid(loc_new))
                 offset = (unsigned long)loc_new - (unsigned long)(_text);

+       reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+
         if (offset) {
                 /* Copy the kernel to it's new location */
                 memcpy(loc_new, _text, kernel_length);
@@ -155,11 +157,8 @@ void *__init do_kaslr(void)
                         "ibar 0 \t\n"
                         "dbar 0 \t\n");

-               reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
                 reloc_offset += offset;

-               relocate_relative();
-
                 /* The current thread is now within the relocated image */
                 __current_thread_info = 
RELOCATED_KASLR(__current_thread_info);

@@ -169,6 +168,9 @@ void *__init do_kaslr(void)
                 update_reloc_offset(&reloc_offset, offset);
         }

+       if (reloc_offset)
+               relocate_relative();
+
         return kernel_entry;
  }
  #endif
@@ -177,7 +179,8 @@ void __init relocate_kernel(void)
  {
         reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;

-       relocate_relative();
+       if (reloc_offset)
+               relocate_relative();
  }

>
>> +
>> +    return kernel_entry;
>> +}
>> +#endif
>> +
>> +void __init relocate_kernel(void)
>> +{
>> +    reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
>> +
>> +    relocate_relative();
>> +}
>> +
>>  /*
>>   * Show relocation information on panic.
>>   */
>>
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ