lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <81d91a68-baf6-06b1-3352-73bea05a2738@citrix.com>
Date:   Fri, 14 Oct 2016 19:38:48 +0100
From:   Andrew Cooper <andrew.cooper3@...rix.com>
To:     Boris Ostrovsky <boris.ostrovsky@...cle.com>,
        <david.vrabel@...rix.com>, <JGross@...e.com>
CC:     Matt Fleming <matt@...eblueprint.co.uk>,
        <xen-devel@...ts.xenproject.org>, <linux-kernel@...r.kernel.org>,
        <roger.pau@...rix.com>
Subject: Re: [Xen-devel] [PATCH 4/8] xen/pvh: Bootstrap PVH guest

On 14/10/16 19:05, Boris Ostrovsky wrote:
> diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
> new file mode 100644
> index 0000000..58c477b
> --- /dev/null
> +++ b/arch/x86/xen/xen-pvh.S
> @@ -0,0 +1,143 @@
> +/*
> + * Copyright C 2016, Oracle and/or its affiliates. All rights reserved.
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License along
> + * with this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +	.code32
> +	.text
> +#define _pa(x)          ((x) - __START_KERNEL_map)
> +
> +#include <linux/elfnote.h>
> +#include <linux/init.h>
> +#include <linux/linkage.h>
> +#include <asm/segment.h>
> +#include <asm/asm.h>
> +#include <asm/boot.h>
> +#include <asm/processor-flags.h>
> +#include <asm/msr.h>
> +#include <xen/interface/elfnote.h>
> +
> +	__HEAD
> +	.code32

Duplicated .code32

> +
> +/* Entry point for PVH guests */
> +ENTRY(pvh_start_xen)
> +	cli
> +	cld

The ABI states that these will be clear.

> +
> +	mov $_pa(gdt), %eax
> +	lgdt (%eax)

I am fairly sure you can express this without an intermediate in %eax.

> +
> +	movl $(__BOOT_DS),%eax
> +	movl %eax,%ds
> +	movl %eax,%es
> +	movl %eax,%ss
> +
> +	/* Stash hvm_start_info */
> +	mov $_pa(pvh_start_info), %edi
> +	mov %ebx, %esi
> +	mov $_pa(pvh_start_info_sz), %ecx
> +	mov (%ecx), %ecx

No need for an intermediate.

> +	rep
> +	movsb

Surely we can guarentee the size is a multiple of 4? movsl would be better.

> +
> +	movl $_pa(early_stack_end), %eax
> +	movl %eax, %esp

You can mov straight into %esp.

> +
> +	/* Enable PAE mode */
> +	movl %cr4, %eax
> +	orl $X86_CR4_PAE, %eax
> +	movl %eax, %cr4
> +
> +#ifdef CONFIG_X86_64
> +	/* Enable Long mode */
> +	movl $MSR_EFER, %ecx
> +	rdmsr
> +	btsl $_EFER_LME, %eax
> +	wrmsr
> +
> +	/* Enable pre-constructed page tables */
> +	mov $_pa(init_level4_pgt), %eax
> +	movl %eax, %cr3
> +	movl $(X86_CR0_PG | X86_CR0_PE), %eax
> +	movl %eax, %cr0
> +
> +	/* Jump to 64-bit mode. */
> +	pushl $__KERNEL_CS
> +	leal _pa(1f), %eax
> +	pushl %eax
> +	lret

You are still in compat mode, so can ljmp $__KERNEL_CS, $_pa(1f)

> +
> +	/* 64-bit entry point */
> +	.code64
> +1:
> +	call xen_prepare_pvh
> +
> +	/* startup_64 expects boot_params in %rsi */
> +	mov $_pa(pvh_bootparams), %rsi
> +	movq $_pa(startup_64), %rax

You seem to have an inconsistent mix of writing the explicit suffixes
when they aren't required.

> +	jmp *%rax
> +
> +#else /* CONFIG_X86_64 */
> +
> +	call setup_pgtable_32
> +
> +	mov $_pa(initial_page_table), %eax
> +	movl %eax, %cr3
> +
> +	movl %cr0, %eax
> +	orl $(X86_CR0_PG | X86_CR0_PE), %eax
> +	movl %eax, %cr0
> +
> +	ljmp $__BOOT_CS,$1f
> +1:
> +	call xen_prepare_pvh

Why does xen_prepare_pvh need paging?  I can't spot anything which
should need it, and it feels conceptually wrong.

~Andrew

> +	mov $_pa(pvh_bootparams), %esi
> +
> +	/* startup_32 doesn't expect paging and PAE to be on */
> +	ljmp $__BOOT_CS,$_pa(2f)
> +2:
> +	movl %cr0, %eax
> +	andl $~X86_CR0_PG, %eax
> +	movl %eax, %cr0
> +	movl %cr4, %eax
> +	andl $~X86_CR4_PAE, %eax
> +	movl %eax, %cr4
> +
> +	ljmp    $0x10, $_pa(startup_32)
> +#endif
> +
> +	.data
> +gdt:
> +	.word	gdt_end - gdt
> +	.long	_pa(gdt)
> +	.word	0
> +	.quad	0x0000000000000000 /* NULL descriptor */
> +#ifdef CONFIG_X86_64
> +	.quad	0x00af9a000000ffff /* __KERNEL_CS */
> +#else
> +	.quad	0x00cf9a000000ffff /* __KERNEL_CS */
> +#endif
> +	.quad	0x00cf92000000ffff /* __KERNEL_DS */
> +gdt_end:
> +
> +	.bss
> +	.balign 4
> +early_stack:
> +	.fill 16, 1, 0
> +early_stack_end:
> +
> +	ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
> +	             _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ