lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 5 Dec 2023 21:27:27 +0800
From:   Chengming Zhou <chengming.zhou@...ux.dev>
To:     Vlastimil Babka <vbabka@...e.cz>, Christoph Lameter <cl@...ux.com>,
        Pekka Enberg <penberg@...nel.org>,
        David Rientjes <rientjes@...gle.com>,
        Joonsoo Kim <iamjoonsoo.kim@....com>
Cc:     Andrew Morton <akpm@...ux-foundation.org>,
        Roman Gushchin <roman.gushchin@...ux.dev>,
        Hyeonggon Yoo <42.hyeyoo@...il.com>,
        Alexander Potapenko <glider@...gle.com>,
        Marco Elver <elver@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, kasan-dev@...glegroups.com
Subject: Re: [PATCH 4/4] mm/slub: free KFENCE objects in slab_free_hook()

On 2023/12/5 03:34, Vlastimil Babka wrote:
> When freeing an object that was allocated from KFENCE, we do that in the
> slowpath __slab_free(), relying on the fact that KFENCE "slab" cannot be
> the cpu slab, so the fastpath has to fallback to the slowpath.
> 
> This optimization doesn't help much though, because is_kfence_address()
> is checked earlier anyway during the free hook processing or detached
> freelist building. Thus we can simplify the code by making the
> slab_free_hook() free the KFENCE object immediately, similarly to KASAN
> quarantine.
> 
> In slab_free_hook() we can place kfence_free() above init processing, as
> callers have been making sure to set init to false for KFENCE objects.
> This simplifies slab_free(). This places it also above kasan_slab_free()
> which is ok as that skips KFENCE objects anyway.
> 
> While at it also determine the init value in slab_free_freelist_hook()
> outside of the loop.
> 
> This change will also make introducing per cpu array caches easier.
> 
> Tested-by: Marco Elver <elver@...gle.com>
> Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
> ---
>  mm/slub.c | 22 ++++++++++------------
>  1 file changed, 10 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index ed2fa92e914c..e38c2b712f6c 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2039,7 +2039,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
>   * production configuration these hooks all should produce no code at all.
>   *
>   * Returns true if freeing of the object can proceed, false if its reuse
> - * was delayed by KASAN quarantine.
> + * was delayed by KASAN quarantine, or it was returned to KFENCE.
>   */
>  static __always_inline
>  bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
> @@ -2057,6 +2057,9 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
>  		__kcsan_check_access(x, s->object_size,
>  				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
>  
> +	if (kfence_free(kasan_reset_tag(x)))

I'm wondering if "kasan_reset_tag()" is needed here?

The patch looks good to me!

Reviewed-by: Chengming Zhou <zhouchengming@...edance.com>

Thanks.

> +		return false;
> +
>  	/*
>  	 * As memory initialization might be integrated into KASAN,
>  	 * kasan_slab_free and initialization memset's must be
> @@ -2086,23 +2089,25 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
>  	void *object;
>  	void *next = *head;
>  	void *old_tail = *tail;
> +	bool init;
>  
>  	if (is_kfence_address(next)) {
>  		slab_free_hook(s, next, false);
> -		return true;
> +		return false;
>  	}
>  
>  	/* Head and tail of the reconstructed freelist */
>  	*head = NULL;
>  	*tail = NULL;
>  
> +	init = slab_want_init_on_free(s);
> +
>  	do {
>  		object = next;
>  		next = get_freepointer(s, object);
>  
>  		/* If object's reuse doesn't have to be delayed */
> -		if (likely(slab_free_hook(s, object,
> -					  slab_want_init_on_free(s)))) {
> +		if (likely(slab_free_hook(s, object, init))) {
>  			/* Move object to the new freelist */
>  			set_freepointer(s, object, *head);
>  			*head = object;
> @@ -4103,9 +4108,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
>  
>  	stat(s, FREE_SLOWPATH);
>  
> -	if (kfence_free(head))
> -		return;
> -
>  	if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
>  		free_to_partial_list(s, slab, head, tail, cnt, addr);
>  		return;
> @@ -4290,13 +4292,9 @@ static __fastpath_inline
>  void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
>  	       unsigned long addr)
>  {
> -	bool init;
> -
>  	memcg_slab_free_hook(s, slab, &object, 1);
>  
> -	init = !is_kfence_address(object) && slab_want_init_on_free(s);
> -
> -	if (likely(slab_free_hook(s, object, init)))
> +	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
>  		do_slab_free(s, slab, object, object, 1, addr);
>  }
>  
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ