lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 10 Dec 2014 10:36:39 -0800
From:	Andy Lutomirski <luto@...capital.net>
To:	Shaohua Li <shli@...com>
Cc:	"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
	X86 ML <x86@...nel.org>, kernel-team@...com,
	"H. Peter Anvin" <hpa@...or.com>, Ingo Molnar <mingo@...hat.com>
Subject: Re: [PATCH 1/3] X86: make VDSO data support multiple pages

On Sun, Dec 7, 2014 at 7:03 PM, Shaohua Li <shli@...com> wrote:
> Currently vdso data is one page. Next patches will add per-cpu data to
> vdso, which requires several pages if CPU number is big. This makes VDSO
> data support multiple pages.

Can you rename __vvar_page to __vvar_pages?

>
> Cc: Andy Lutomirski <luto@...capital.net>
> Cc: H. Peter Anvin <hpa@...or.com>
> Cc: Ingo Molnar <mingo@...hat.com>
> Signed-off-by: Shaohua Li <shli@...com>
> ---
>  arch/x86/include/asm/vvar.h     | 6 +++++-
>  arch/x86/kernel/asm-offsets.c   | 5 +++++
>  arch/x86/kernel/vmlinux.lds.S   | 4 +---
>  arch/x86/vdso/vdso-layout.lds.S | 5 +++--
>  arch/x86/vdso/vma.c             | 3 ++-
>  5 files changed, 16 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
> index 5d2b9ad..fcbe621 100644
> --- a/arch/x86/include/asm/vvar.h
> +++ b/arch/x86/include/asm/vvar.h
> @@ -47,7 +47,11 @@ extern char __vvar_page;
>  DECLARE_VVAR(0, volatile unsigned long, jiffies)
>  DECLARE_VVAR(16, int, vgetcpu_mode)
>  DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
> -
> +/*
> + * you must update VVAR_TOTAL_SIZE to reflect all of the variables we're
> + * stuffing into the vvar area.  Don't change any of the above without
> + * also changing this math of VVAR_TOTAL_SIZE
> + */
>  #undef DECLARE_VVAR
>
>  #endif
> diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
> index 9f6b934..0ab31a9 100644
> --- a/arch/x86/kernel/asm-offsets.c
> +++ b/arch/x86/kernel/asm-offsets.c
> @@ -16,6 +16,7 @@
>  #include <asm/sigframe.h>
>  #include <asm/bootparam.h>
>  #include <asm/suspend.h>
> +#include <asm/vgtod.h>
>
>  #ifdef CONFIG_XEN
>  #include <xen/interface/xen.h>
> @@ -71,4 +72,8 @@ void common(void) {
>
>         BLANK();
>         DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
> +
> +       BLANK();
> +       DEFINE(VVAR_TOTAL_SIZE,
> +               ALIGN(128 + sizeof(struct vsyscall_gtod_data), PAGE_SIZE));

Perhaps add:

BUILD_BUG_ON(VVAR_TOTAL_SIZE % PAGE_SIZE != 0);

or just keep the alignment stuff that you removed.

Although, TBH, this is still rather ugly IMO.  Maybe we should just
have struct vvar somewhere and make everything use it.  We couldn't do
that before because of jiffies and such, but those are all gone now.

--Andy

--Andy

>  }
> diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
> index 49edf2d..8b11307 100644
> --- a/arch/x86/kernel/vmlinux.lds.S
> +++ b/arch/x86/kernel/vmlinux.lds.S
> @@ -168,11 +168,9 @@ SECTIONS
>                  * Pad the rest of the page with zeros.  Otherwise the loader
>                  * can leave garbage here.
>                  */
> -               . = __vvar_beginning_hack + PAGE_SIZE;
> +               . = __vvar_beginning_hack + VVAR_TOTAL_SIZE;
>         } :data
>
> -       . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
> -
>         /* Init code and data - will be freed after init */
>         . = ALIGN(PAGE_SIZE);
>         .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
> diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
> index de2c921..acaf8ce 100644
> --- a/arch/x86/vdso/vdso-layout.lds.S
> +++ b/arch/x86/vdso/vdso-layout.lds.S
> @@ -1,4 +1,5 @@
>  #include <asm/vdso.h>
> +#include <asm/asm-offsets.h>
>
>  /*
>   * Linker script for vDSO.  This is an ELF shared object prelinked to
> @@ -25,7 +26,7 @@ SECTIONS
>          * segment.
>          */
>
> -       vvar_start = . - 2 * PAGE_SIZE;
> +       vvar_start = . - (VVAR_TOTAL_SIZE + PAGE_SIZE);
>         vvar_page = vvar_start;
>
>         /* Place all vvars at the offsets in asm/vvar.h. */
> @@ -35,7 +36,7 @@ SECTIONS
>  #undef __VVAR_KERNEL_LDS
>  #undef EMIT_VVAR
>
> -       hpet_page = vvar_start + PAGE_SIZE;
> +       hpet_page = vvar_start + VVAR_TOTAL_SIZE;
>
>         . = SIZEOF_HEADERS;
>
> diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
> index 970463b..fc37067 100644
> --- a/arch/x86/vdso/vma.c
> +++ b/arch/x86/vdso/vma.c
> @@ -16,6 +16,7 @@
>  #include <asm/vdso.h>
>  #include <asm/page.h>
>  #include <asm/hpet.h>
> +#include <asm/asm-offsets.h>
>
>  #if defined(CONFIG_X86_64)
>  unsigned int __read_mostly vdso64_enabled = 1;
> @@ -150,7 +151,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
>                 ret = remap_pfn_range(vma,
>                                       text_start + image->sym_vvar_page,
>                                       __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
> -                                     PAGE_SIZE,
> +                                     VVAR_TOTAL_SIZE,
>                                       PAGE_READONLY);
>
>         if (ret)
> --
> 1.8.1
>



-- 
Andy Lutomirski
AMA Capital Management, LLC
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ