lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CANpmjNN-RCZEzU8tLsUVGLtuDgXMRjddOW3fj6bEzCw2+FSiNg@mail.gmail.com>
Date:   Wed, 29 Nov 2023 13:00:00 +0100
From:   Marco Elver <elver@...gle.com>
To:     Vlastimil Babka <vbabka@...e.cz>
Cc:     Christoph Lameter <cl@...ux.com>,
        Pekka Enberg <penberg@...nel.org>,
        David Rientjes <rientjes@...gle.com>,
        Joonsoo Kim <iamjoonsoo.kim@....com>,
        Matthew Wilcox <willy@...radead.org>,
        "Liam R. Howlett" <Liam.Howlett@...cle.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Roman Gushchin <roman.gushchin@...ux.dev>,
        Hyeonggon Yoo <42.hyeyoo@...il.com>,
        Alexander Potapenko <glider@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, maple-tree@...ts.infradead.org,
        kasan-dev@...glegroups.com
Subject: Re: [PATCH RFC v3 4/9] mm/slub: free KFENCE objects in slab_free_hook()

On Wed, 29 Nov 2023 at 10:53, Vlastimil Babka <vbabka@...e.cz> wrote:
>
> When freeing an object that was allocated from KFENCE, we do that in the
> slowpath __slab_free(), relying on the fact that KFENCE "slab" cannot be
> the cpu slab, so the fastpath has to fallback to the slowpath.
>
> This optimization doesn't help much though, because is_kfence_address()
> is checked earlier anyway during the free hook processing or detached
> freelist building. Thus we can simplify the code by making the
> slab_free_hook() free the KFENCE object immediately, similarly to KASAN
> quarantine.
>
> In slab_free_hook() we can place kfence_free() above init processing, as
> callers have been making sure to set init to false for KFENCE objects.
> This simplifies slab_free(). This places it also above kasan_slab_free()
> which is ok as that skips KFENCE objects anyway.
>
> While at it also determine the init value in slab_free_freelist_hook()
> outside of the loop.
>
> This change will also make introducing per cpu array caches easier.
>
> Signed-off-by: Vlastimil Babka <vbabka@...e.cz>

Tested-by: Marco Elver <elver@...gle.com>

> ---
>  mm/slub.c | 21 ++++++++++-----------
>  1 file changed, 10 insertions(+), 11 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 7d23f10d42e6..59912a376c6d 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1772,7 +1772,7 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
>   * production configuration these hooks all should produce no code at all.
>   *
>   * Returns true if freeing of the object can proceed, false if its reuse
> - * was delayed by KASAN quarantine.
> + * was delayed by KASAN quarantine, or it was returned to KFENCE.
>   */
>  static __always_inline
>  bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
> @@ -1790,6 +1790,9 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
>                 __kcsan_check_access(x, s->object_size,
>                                      KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
>
> +       if (kfence_free(kasan_reset_tag(x)))
> +               return false;
> +
>         /*
>          * As memory initialization might be integrated into KASAN,
>          * kasan_slab_free and initialization memset's must be
> @@ -1819,22 +1822,25 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
>         void *object;
>         void *next = *head;
>         void *old_tail = *tail;
> +       bool init;
>
>         if (is_kfence_address(next)) {
>                 slab_free_hook(s, next, false);
> -               return true;
> +               return false;
>         }
>
>         /* Head and tail of the reconstructed freelist */
>         *head = NULL;
>         *tail = NULL;
>
> +       init = slab_want_init_on_free(s);
> +
>         do {
>                 object = next;
>                 next = get_freepointer(s, object);
>
>                 /* If object's reuse doesn't have to be delayed */
> -               if (slab_free_hook(s, object, slab_want_init_on_free(s))) {
> +               if (slab_free_hook(s, object, init)) {
>                         /* Move object to the new freelist */
>                         set_freepointer(s, object, *head);
>                         *head = object;
> @@ -3619,9 +3625,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
>
>         stat(s, FREE_SLOWPATH);
>
> -       if (kfence_free(head))
> -               return;
> -
>         if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
>                 free_to_partial_list(s, slab, head, tail, cnt, addr);
>                 return;
> @@ -3806,13 +3809,9 @@ static __fastpath_inline
>  void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
>                unsigned long addr)
>  {
> -       bool init;
> -
>         memcg_slab_free_hook(s, slab, &object, 1);
>
> -       init = !is_kfence_address(object) && slab_want_init_on_free(s);
> -
> -       if (likely(slab_free_hook(s, object, init)))
> +       if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
>                 do_slab_free(s, slab, object, object, 1, addr);
>  }
>
>
> --
> 2.43.0
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ