lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171110121756.t7mn7bb4gy3rnw2w@hirez.programming.kicks-ass.net>
Date:   Fri, 10 Nov 2017 13:17:56 +0100
From:   Peter Zijlstra <peterz@...radead.org>
To:     Dave Hansen <dave.hansen@...ux.intel.com>
Cc:     linux-kernel@...r.kernel.org, linux-mm@...ck.org, hughd@...gle.com,
        moritz.lipp@...k.tugraz.at, daniel.gruss@...k.tugraz.at,
        michael.schwarz@...k.tugraz.at, richard.fellner@...dent.tugraz.at,
        luto@...nel.org, torvalds@...ux-foundation.org,
        keescook@...gle.com, x86@...nel.org
Subject: Re: [PATCH 18/30] x86, kaiser: map virtually-addressed performance
 monitoring buffers

On Wed, Nov 08, 2017 at 11:47:20AM -0800, Dave Hansen wrote:
> +static
> +DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct debug_store, cpu_debug_store);
> +
>  /* The size of a BTS record in bytes: */
>  #define BTS_RECORD_SIZE		24
>  
> @@ -278,6 +282,39 @@ void fini_debug_store_on_cpu(int cpu)
>  
>  static DEFINE_PER_CPU(void *, insn_buffer);
>  
> +static void *dsalloc(size_t size, gfp_t flags, int node)
> +{
> +#ifdef CONFIG_KAISER
> +	unsigned int order = get_order(size);
> +	struct page *page;
> +	unsigned long addr;
> +
> +	page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
> +	if (!page)
> +		return NULL;
> +	addr = (unsigned long)page_address(page);
> +	if (kaiser_add_mapping(addr, size, __PAGE_KERNEL | _PAGE_GLOBAL) < 0) {
> +		__free_pages(page, order);
> +		addr = 0;
> +	}
> +	return (void *)addr;
> +#else
> +	return kmalloc_node(size, flags | __GFP_ZERO, node);
> +#endif
> +}
> +
> +static void dsfree(const void *buffer, size_t size)
> +{
> +#ifdef CONFIG_KAISER
> +	if (!buffer)
> +		return;
> +	kaiser_remove_mapping((unsigned long)buffer, size);
> +	free_pages((unsigned long)buffer, get_order(size));
> +#else
> +	kfree(buffer);
> +#endif
> +}

You might as well use __alloc_pages_node() / free_pages()
unconditionally. Those buffers are at least one page in size.

That should also get rid of the #ifdef muck.

>  static int alloc_ds_buffer(int cpu)
>  {
> -	int node = cpu_to_node(cpu);
> -	struct debug_store *ds;
> -
> -	ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
> -	if (unlikely(!ds))
> -		return -ENOMEM;
> +	struct debug_store *ds = per_cpu_ptr(&cpu_debug_store, cpu);
>  
> +	memset(ds, 0, sizeof(*ds));

Why the memset() ? isn't static per-cpu memory 0 initialized

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ