lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 30 Oct 2022 03:59:22 +0100
From:   Marco Elver <elver@...gle.com>
To:     andrey.konovalov@...ux.dev
Cc:     Andrey Konovalov <andreyknvl@...il.com>,
        Alexander Potapenko <glider@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        Andrey Ryabinin <ryabinin.a.a@...il.com>,
        kasan-dev@...glegroups.com, Peter Collingbourne <pcc@...gle.com>,
        Evgenii Stepanov <eugenis@...gle.com>,
        Florian Mayer <fmayer@...gle.com>,
        Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org,
        Andrey Konovalov <andreyknvl@...gle.com>
Subject: Re: [PATCH] kasan: allow sampling page_alloc allocations for HW_TAGS

On Thu, Oct 27, 2022 at 10:10PM +0200, andrey.konovalov@...ux.dev wrote:
> From: Andrey Konovalov <andreyknvl@...gle.com>
> 
> Add a new boot parameter called kasan.page_alloc.sample, which makes
> Hardware Tag-Based KASAN tag only every Nth page_alloc allocation.
> 
> As Hardware Tag-Based KASAN is intended to be used in production, its
> performance impact is crucial. As page_alloc allocations tend to be big,
> tagging and checking all such allocations introduces a significant
> slowdown in some testing scenarios. The new flag allows to alleviate
> that slowdown.
> 
> Enabling page_alloc sampling has a downside: KASAN will miss bad accesses
> to a page_alloc allocation that has not been tagged.
> 
> Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
> ---
>  Documentation/dev-tools/kasan.rst |  4 +++
>  include/linux/kasan.h             |  7 ++---
>  mm/kasan/common.c                 |  9 +++++--
>  mm/kasan/hw_tags.c                | 26 +++++++++++++++++++
>  mm/kasan/kasan.h                  | 15 +++++++++++
>  mm/page_alloc.c                   | 43 +++++++++++++++++++++----------
>  6 files changed, 85 insertions(+), 19 deletions(-)
> 
> diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
> index 5c93ab915049..bd97301845ef 100644
> --- a/Documentation/dev-tools/kasan.rst
> +++ b/Documentation/dev-tools/kasan.rst
> @@ -140,6 +140,10 @@ disabling KASAN altogether or controlling its features:
>  - ``kasan.vmalloc=off`` or ``=on`` disables or enables tagging of vmalloc
>    allocations (default: ``on``).
>  
> +- ``kasan.page_alloc.sample=<sampling frequency>`` makes KASAN tag only

Frequency is number of samples per frame (unit time, or if used
non-temporally like here, population size).

[1] https://en.wikipedia.org/wiki/Systematic_sampling

You're using it as an interval, so I'd just replace uses of frequency
with "interval" appropriately here and elsewhere.

> +  every Nth page_alloc allocation, where N is the value of the parameter
> +  (default: ``1``).
> +
>  Error reports
>  ~~~~~~~~~~~~~
>  
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index d811b3d7d2a1..d45d45dfd007 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -120,12 +120,13 @@ static __always_inline void kasan_poison_pages(struct page *page,
>  		__kasan_poison_pages(page, order, init);
>  }
>  
> -void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
> -static __always_inline void kasan_unpoison_pages(struct page *page,
> +bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
> +static __always_inline bool kasan_unpoison_pages(struct page *page,
>  						 unsigned int order, bool init)
>  {
>  	if (kasan_enabled())
> -		__kasan_unpoison_pages(page, order, init);
> +		return __kasan_unpoison_pages(page, order, init);
> +	return false;
>  }
>  
>  void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 833bf2cfd2a3..1f30080a7a4c 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -95,19 +95,24 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
>  }
>  #endif /* CONFIG_KASAN_STACK */
>  
> -void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
> +bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
>  {
>  	u8 tag;
>  	unsigned long i;
>  
>  	if (unlikely(PageHighMem(page)))
> -		return;
> +		return false;
> +
> +	if (!kasan_sample_page_alloc())
> +		return false;
>  
>  	tag = kasan_random_tag();
>  	kasan_unpoison(set_tag(page_address(page), tag),
>  		       PAGE_SIZE << order, init);
>  	for (i = 0; i < (1 << order); i++)
>  		page_kasan_tag_set(page + i, tag);
> +
> +	return true;
>  }
>  
>  void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
> diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
> index b22c4f461cb0..aa3b5a080297 100644
> --- a/mm/kasan/hw_tags.c
> +++ b/mm/kasan/hw_tags.c
> @@ -59,6 +59,11 @@ EXPORT_SYMBOL_GPL(kasan_mode);
>  /* Whether to enable vmalloc tagging. */
>  DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
>  
> +/* Frequency of page_alloc allocation poisoning. */
> +unsigned long kasan_page_alloc_sample = 1;
> +
> +DEFINE_PER_CPU(unsigned long, kasan_page_alloc_count);
> +
>  /* kasan=off/on */
>  static int __init early_kasan_flag(char *arg)
>  {
> @@ -122,6 +127,27 @@ static inline const char *kasan_mode_info(void)
>  		return "sync";
>  }
>  
> +/* kasan.page_alloc.sample=<sampling frequency> */
> +static int __init early_kasan_flag_page_alloc_sample(char *arg)
> +{
> +	int rv;
> +
> +	if (!arg)
> +		return -EINVAL;
> +
> +	rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
> +	if (rv)
> +		return rv;
> +
> +	if (!kasan_page_alloc_sample) {
> +		kasan_page_alloc_sample = 1;
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
> +
>  /*
>   * kasan_init_hw_tags_cpu() is called for each CPU.
>   * Not marked as __init as a CPU can be hot-plugged after boot.
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index abbcc1b0eec5..ee67eb35f4a7 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -42,6 +42,9 @@ enum kasan_mode {
>  
>  extern enum kasan_mode kasan_mode __ro_after_init;
>  
> +extern unsigned long kasan_page_alloc_sample;
> +DECLARE_PER_CPU(unsigned long, kasan_page_alloc_count);
> +
>  static inline bool kasan_vmalloc_enabled(void)
>  {
>  	return static_branch_likely(&kasan_flag_vmalloc);
> @@ -57,6 +60,13 @@ static inline bool kasan_sync_fault_possible(void)
>  	return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
>  }
>  
> +static inline bool kasan_sample_page_alloc(void)
> +{
> +	unsigned long *count = this_cpu_ptr(&kasan_page_alloc_count);

this_cpu_inc_return()

without it, you need to ensure preemption is disabled around here.

> +
> +	return (*count)++ % kasan_page_alloc_sample == 0;

Doing '%' is a potentially costly operation if called in a fast-path.

We can generate better code with (rename 'count' -> 'skip'):

	long skip_next = this_cpu_dec_return(kasan_page_alloc_skip);

	if (skip_next < 0) {
		this_cpu_write(kasan_page_alloc_skip, kasan_page_alloc_sample - 1);
		return true;
	}

	return false;

Important is also to switch the counter to a 'long', otherwise you'd
have to pre-initialize all of them to something non-zero to avoid wrap.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ