lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Wed, 11 Jan 2017 20:41:53 +0100
From:   Michal Hocko <mhocko@...nel.org>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     Andrew Morton <akpm@...ux-foundation.org>,
        Arnd Bergmann <arnd@...db.de>,
        Alexander Potapenko <glider@...gle.com>,
        Linus Torvalds <torvalds@...ux-foundation.org>
Subject: Re: [RFC PATCH] lib/bug: use stackdepot for WARN*ONCE to make it
 more useful

Has this just fallen through cracks or the silence means that people do
not really care about the current WARN_*ONCE behavior?

On Tue 03-01-17 14:44:24, Michal Hocko wrote:
> From: Michal Hocko <mhocko@...e.com>
> 
> One of the main problem of WARN_*ONCE style of warnings is that they
> might easily hide different callpaths which lead to the warning. This is
> especially true for library functions. Rebooting after each offender
> hitting the particular WARN_*ONCE is just not feasible.
> 
> This patch (ab)uses stackdepot to make WARN_*ONCE more usable. We can
> store whether a certain callpath has been seen in the stackdepot's
> stack_record. This way only uniq stack traces will be reported.
> 
> On the other hand this is not for free. WARN_*ONCE will consume more
> stack (at least WARN_STACK_DEPTH*sizeof(unsigned long) + sizeof(struct
> stack_trace)) and also requires to allocate a memory sometimes which
> might fail. In those cases the warninig will be issued multiple times
> potentially.
> 
> Signed-off-by: Michal Hocko <mhocko@...e.com>
> ---
> Hi,
> this is an (untested) RFC to show the idea. It has some known problems:
>  - depot_test_set_reported_stack is GPL symbol which is not suitable for
>    generic thing as WARN_*ONCE but the rest of the stackdepot is exported
>    as GPL only and I haven't explored why.
>  - there needs to be a recursion detection because nothing really prevents
>    depot_test_set_reported_stack to call into WARN_*ONCE inderectly
>    (e.g. through memory allocation)
>  - WARN_STACK_DEPTH doesn't really cooperate with other users of the stack
>    depot well (namely KASAN_STACK_DEPTH)
>  - configuration space would need to be considered because the STACKDEPOT
>    cannot be selected manually currently
> 
> and maybe others. But I think the idea should be at least considered. So
> here it goes.
> 
> Are there any thoughts, comments?
> 
>  include/asm-generic/bug.h  | 22 +++++++++++++
>  include/linux/stackdepot.h |  2 ++
>  lib/stackdepot.c           | 79 ++++++++++++++++++++++++++++++++++++++--------
>  3 files changed, 90 insertions(+), 13 deletions(-)
> 
> diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
> index 6f96247226a4..0b19fddea7ff 100644
> --- a/include/asm-generic/bug.h
> +++ b/include/asm-generic/bug.h
> @@ -11,6 +11,7 @@
>  
>  #ifndef __ASSEMBLY__
>  #include <linux/kernel.h>
> +#include <linux/stackdepot.h>
>  
>  #ifdef CONFIG_BUG
>  
> @@ -112,6 +113,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
>  	unlikely(__ret_warn_on);					\
>  })
>  
> +#ifndef CONFIG_STACKDEPOT
>  #define WARN_ON_ONCE(condition)	({				\
>  	static bool __section(.data.unlikely) __warned;		\
>  	int __ret_warn_once = !!(condition);			\
> @@ -133,6 +135,26 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
>  	}							\
>  	unlikely(__ret_warn_once);				\
>  })
> +#else
> +#define WARN_STACK_DEPTH 16
> +#define WARN_ON_ONCE(condition)	({				\
> +	int __ret_warn_once = !!(condition);			\
> +								\
> +	if (unlikely(__ret_warn_once && !depot_test_set_reported_stack(WARN_STACK_DEPTH))) {		\
> +		WARN_ON(1);					\
> +	}							\
> +	unlikely(__ret_warn_once);				\
> +})
> +
> +#define WARN_ONCE(condition, format...)	({			\
> +	int __ret_warn_once = !!(condition);			\
> +								\
> +	if (unlikely(__ret_warn_once && !depot_test_set_reported_stack(WARN_STACK_DEPTH))) {		\
> +		WARN(1, format);				\
> +	}							\
> +	unlikely(__ret_warn_once);				\
> +})
> +#endif
>  
>  #define WARN_TAINT_ONCE(condition, taint, format...)	({	\
>  	static bool __section(.data.unlikely) __warned;		\
> diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
> index 7978b3e2c1e1..0e6e047c5e02 100644
> --- a/include/linux/stackdepot.h
> +++ b/include/linux/stackdepot.h
> @@ -29,4 +29,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
>  
>  void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
>  
> +bool depot_test_set_reported_stack(int stack_depth);
> +
>  #endif
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index f87d138e9672..39a471912aa1 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -68,7 +68,10 @@ union handle_parts {
>  struct stack_record {
>  	struct stack_record *next;	/* Link in the hashtable */
>  	u32 hash;			/* Hash in the hastable */
> -	u32 size;			/* Number of frames in the stack */
> +	u32 size;			/* Number of frames in the stack - the top bit
> +					   encodes whether the stack has been seen already
> +					   by depot_test_set_reported_stack. Use entries_count
> +					   to get the value */
>  	union handle_parts handle;
>  	unsigned long entries[1];	/* Variable-sized array of entries. */
>  };
> @@ -80,6 +83,15 @@ static int next_slab_inited;
>  static size_t depot_offset;
>  static DEFINE_SPINLOCK(depot_lock);
>  
> +#define STACK_SEEN_BIT	31
> +#define STACK_SEEN_MASK (1u<<STACK_SEEN_BIT)
> +#define STACK_COUNT_MASK ~STACK_SEEN_MASK
> +
> +static inline u32 entries_count(struct stack_record *record)
> +{
> +	return record->size & STACK_COUNT_MASK;
> +}
> +
>  static bool init_stack_slab(void **prealloc)
>  {
>  	if (!*prealloc)
> @@ -172,7 +184,7 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
>  
>  	for (found = bucket; found; found = found->next) {
>  		if (found->hash == hash &&
> -		    found->size == size &&
> +		    entries_count(found) == size &&
>  		    !memcmp(entries, found->entries,
>  			    size * sizeof(unsigned long))) {
>  			return found;
> @@ -188,24 +200,16 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
>  	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
>  	struct stack_record *stack = slab + offset;
>  
> -	trace->nr_entries = trace->max_entries = stack->size;
> +	trace->nr_entries = trace->max_entries = entries_count(stack);
>  	trace->entries = stack->entries;
>  	trace->skip = 0;
>  }
>  EXPORT_SYMBOL_GPL(depot_fetch_stack);
>  
> -/**
> - * depot_save_stack - save stack in a stack depot.
> - * @trace - the stacktrace to save.
> - * @alloc_flags - flags for allocating additional memory if required.
> - *
> - * Returns the handle of the stack struct stored in depot.
> - */
> -depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
> +struct stack_record *__depot_save_stack(struct stack_trace *trace,
>  				    gfp_t alloc_flags)
>  {
>  	u32 hash;
> -	depot_stack_handle_t retval = 0;
>  	struct stack_record *found = NULL, **bucket;
>  	unsigned long flags;
>  	struct page *page = NULL;
> @@ -279,9 +283,58 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
>  		/* Nobody used this memory, ok to free it. */
>  		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
>  	}
> +fast_exit:
> +	return found;
> +}
> +
> +/**
> + * depot_save_stack - save stack in a stack depot.
> + * @trace - the stacktrace to save.
> + * @alloc_flags - flags for allocating additional memory if required.
> + *
> + * Returns the handle of the stack struct stored in depot.
> + */
> +depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
> +				    gfp_t alloc_flags)
> +{
> +	depot_stack_handle_t retval = 0;
> +	struct stack_record *found = NULL;
> +
> +	found = __depot_save_stack(trace, alloc_flags);
>  	if (found)
>  		retval = found->handle.handle;
> -fast_exit:
> +
>  	return retval;
>  }
>  EXPORT_SYMBOL_GPL(depot_save_stack);
> +
> +/* FIXME be careful about recursion */
> +bool depot_test_set_reported_stack(int stack_depth)
> +{
> +	unsigned long entries[stack_depth];
> +	struct stack_trace trace = {
> +		.nr_entries = 0,
> +		.entries = entries,
> +		.max_entries = stack_depth,
> +		.skip = 0
> +	};
> +	struct stack_record *record;
> +
> +	/* FIXME deduplicate save_stack from kasan.c */
> +	save_stack_trace(&trace);
> +	if (trace.nr_entries != 0 &&
> +	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
> +		trace.nr_entries--;
> +
> +	record = __depot_save_stack(&trace, GFP_ATOMIC);
> +	if (!record)
> +		return false;
> +
> +	/* This is racy but races should be too rare to matter */
> +	if (record->size & STACK_SEEN_MASK)
> +		return true;
> +
> +	record->size = STACK_SEEN_MASK | entries_count(record);
> +	return false;
> +}
> +EXPORT_SYMBOL_GPL(depot_test_set_reported_stack);
> -- 
> 2.11.0
> 

-- 
Michal Hocko
SUSE Labs

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ