[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAGXu5jKhB=ueGkFmQTFw==ZKnsnEuBqRD_EnYpp6z7sB3e6YHA@mail.gmail.com>
Date: Wed, 15 Jun 2016 16:25:37 -0700
From: Kees Cook <keescook@...omium.org>
To: Yinghai Lu <yinghai@...nel.org>
Cc: Ingo Molnar <mingo@...hat.com>,
Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
"the arch/x86 maintainers" <x86@...nel.org>,
Baoquan He <bhe@...hat.com>, Borislav Petkov <bp@...e.de>,
Andy Lutomirski <luto@...nel.org>,
Thomas Garnier <thgarnie@...gle.com>
Subject: Re: [PATCH v10] x86/KASLR: Clarify identity map interface
On Wed, Jun 15, 2016 at 4:23 PM, Yinghai Lu <yinghai@...nel.org> wrote:
> On Wed, Jun 15, 2016 at 12:03 PM, Kees Cook <keescook@...omium.org> wrote:
>> index cfeb0259ed81..03a6f5d85a6b 100644
>> --- a/arch/x86/boot/compressed/kaslr.c
>> +++ b/arch/x86/boot/compressed/kaslr.c
>> @@ -485,6 +485,9 @@ unsigned char *choose_random_location(unsigned long input,
>>
>> boot_params->hdr.loadflags |= KASLR_FLAG;
>>
>> + /* Prepare to add new identity pagetables on demand. */
>> + initialize_identity_maps();
>> +
>> /* Record the various known unsafe memory ranges. */
>> mem_avoid_init(input, input_size, output);
>>
> ...
>>
>> - /* Make sure we have a top level page table ready to use. */
>> - if (!level4p)
>> - prepare_level4();
>> -
>> /* Align boundary to 2M. */
>> start = round_down(start, PMD_SIZE);
>> end = round_up(end, PMD_SIZE);
>
> it is good to avoid that checking.
>
> BTW, can you continue simplify mem_avoid_init() ?
> something like:
Ah, that would be a nice improvement, yes. Let's pursue this after the
rest of the stack lands first.
-Kees
>
> Index: linux-2.6/arch/x86/boot/compressed/kaslr.c
> ===================================================================
> --- linux-2.6.orig/arch/x86/boot/compressed/kaslr.c
> +++ linux-2.6/arch/x86/boot/compressed/kaslr.c
> @@ -122,16 +122,6 @@ struct mem_vector {
> unsigned long size;
> };
>
> -enum mem_avoid_index {
> - MEM_AVOID_ZO_RANGE = 0,
> - MEM_AVOID_INITRD,
> - MEM_AVOID_CMDLINE,
> - MEM_AVOID_BOOTPARAMS,
> - MEM_AVOID_MAX,
> -};
> -
> -static struct mem_vector mem_avoid[MEM_AVOID_MAX];
> -
> static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
> {
> /* Item at least partially before region. */
> @@ -154,6 +144,25 @@ static bool mem_overlaps(struct mem_vect
> return true;
> }
>
> +#define MEM_AVOID_MAX 4
> +static int avoid_count;
> +static struct mem_vector mem_avoid[MEM_AVOID_MAX];
> +static void mem_avoid_add_map(unsigned long start, unsigned long size,
> + int add_map)
> +{
> + if (avoid_count >= ARRAY_SIZE(mem_avoid)) {
> + warn("KASLR disabled: mem_avoid too small");
> + return;
> + mem_avoid[avoid_count].start = start;
> + mem_avoid[avoid_count].size = size;
> + if (add_map)
> + add_identity_map(start, size);
> +
> + avoid_count++;
> +}
> +
> /*
> * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
> * The mem_avoid array is used to store the ranges that need to be avoided
> @@ -240,19 +249,15 @@ static void mem_avoid_init(unsigned long
> * Avoid the region that is unsafe to overlap during
> * decompression.
> */
> - mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
> - mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
> - add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
> - mem_avoid[MEM_AVOID_ZO_RANGE].size);
> + mem_avoid_add_map(input, (output + init_size) - input, 1);
>
> /* Avoid initrd. */
> initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
> initrd_start |= boot_params->hdr.ramdisk_image;
> initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
> initrd_size |= boot_params->hdr.ramdisk_size;
> - mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
> - mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
> /* No need to set mapping for initrd, it will be handled in VO. */
> + mem_avoid_add_map(initrd_start, initrd_size, 0);
>
> /* Avoid kernel command line. */
> cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32;
> @@ -261,16 +266,10 @@ static void mem_avoid_init(unsigned long
> ptr = (char *)(unsigned long)cmd_line;
> for (cmd_line_size = 0; ptr[cmd_line_size++]; )
> ;
> - mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
> - mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
> - add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
> - mem_avoid[MEM_AVOID_CMDLINE].size);
> + mem_avoid_add_map(cmd_line, cmd_line_size, 1);
>
> /* Avoid boot parameters. */
> - mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
> - mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
> - add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
> - mem_avoid[MEM_AVOID_BOOTPARAMS].size);
> + mem_avoid_add_map((unsigned long)boot_params, sizeof(*boot_params), 1);
>
> /* We don't need to set a mapping for setup_data. */
>
> @@ -292,7 +291,7 @@ static bool mem_avoid_overlap(struct mem
> unsigned long earliest = img->start + img->size;
> bool is_overlapping = false;
>
> - for (i = 0; i < MEM_AVOID_MAX; i++) {
> + for (i = 0; i < avoid_count; i++) {
> if (mem_overlaps(img, &mem_avoid[i]) &&
> mem_avoid[i].start < earliest) {
> *overlap = mem_avoid[i];
--
Kees Cook
Chrome OS & Brillo Security
Powered by blists - more mailing lists