lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 24 Aug 2022 12:58:59 +0900
From:   Hyeonggon Yoo <42.hyeyoo@...il.com>
To:     Vlastimil Babka <vbabka@...e.cz>
Cc:     Christoph Lameter <cl@...ux.com>,
        Pekka Enberg <penberg@...nel.org>,
        David Rientjes <rientjes@...gle.com>,
        Joonsoo Kim <iamjoonsoo.kim@....com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Roman Gushchin <roman.gushchin@...ux.dev>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 00/17] common kmalloc v4

On Tue, Aug 23, 2022 at 05:16:17PM +0200, Vlastimil Babka wrote:
> On 8/17/22 12:18, Hyeonggon Yoo wrote:
> > v3: https://lore.kernel.org/lkml/20220712133946.307181-1-42.hyeyoo@gmail.com/
> > 
> > Hello, this is common kmalloc v4.
> > Please review and consider applying.
> 
> Thanks, added to slab.git for-6.1/common_kmalloc and merged to for-next!

Thanks!

But please see these:
	https://lore.kernel.org/linux-mm/YwWfr8ATVx2Ag94z@hyeyoo/
	https://lore.kernel.org/lkml/20220824134530.1b10e768@canb.auug.org.au/T/#u

Fixed those, So please pull this:
	https://github.com/hygoni/linux.git slab-common-v4r1

git range-diff	for-6.1/common_kmalloc~17...for-6.1/common_kmalloc \
		slab-common-v4r1~17...slab-common-v4r1:

 1:  0276f0da97e3 =  1:  0276f0da97e3 mm/slab: move NUMA-related code to __do_cache_alloc()
 2:  d5ea00e8d8c9 =  2:  d5ea00e8d8c9 mm/slab: cleanup slab_alloc() and slab_alloc_node()
 3:  48c55c42e6b8 =  3:  48c55c42e6b8 mm/slab_common: remove CONFIG_NUMA ifdefs for common kmalloc functions
 4:  cd8523b488ec =  4:  cd8523b488ec mm/slab_common: cleanup kmalloc_track_caller()
 5:  0b92d497e03a =  5:  0b92d497e03a mm/sl[au]b: factor out __do_kmalloc_node()
 6:  d43649c0f472 =  6:  d43649c0f472 mm/slab_common: fold kmalloc_order_trace() into kmalloc_large()
 7:  cd6d756d6118 =  7:  cd6d756d6118 mm/slub: move kmalloc_large_node() to slab_common.c
 8:  fe8f3819416e !  8:  ec277200c5dd mm/slab_common: kmalloc_node: pass large requests to page allocator
    @@ mm/slab_common.c: void *kmalloc_large(size_t size, gfp_t flags)
      EXPORT_SYMBOL(kmalloc_large);

     -void *kmalloc_large_node(size_t size, gfp_t flags, int node)
    -+void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    ++static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
      {
      	struct page *page;
      	void *ptr = NULL;
 9:  cc40615623ed !  9:  3d1d49576f4a mm/slab_common: cleanup kmalloc_large()
    @@ mm/slab_common.c: gfp_t kmalloc_fix_flags(gfp_t flags)
     -}
     -EXPORT_SYMBOL(kmalloc_large);

    - void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    + static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
      {
    -@@ mm/slab_common.c: void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    +@@ mm/slab_common.c: static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
      	void *ptr = NULL;
      	unsigned int order = get_order(size);

    @@ mm/slab_common.c: void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int
      	flags |= __GFP_COMP;
      	page = alloc_pages_node(node, flags, order);
      	if (page) {
    -@@ mm/slab_common.c: void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    +@@ mm/slab_common.c: static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
      	return ptr;
      }

10:  e14d748cf9ad = 10:  d6d55b2e745a mm/slab: kmalloc: pass requests larger than order-1 page to page allocator
11:  84000279b448 = 11:  28c1aabc9f73 mm/sl[au]b: introduce common alloc/free functions without tracepoint
12:  79c7527b9805 ! 12:  7fefa4235ba9 mm/sl[au]b: generalize kmalloc subsystem
    @@ mm/slab_common.c: void free_large_kmalloc(struct folio *folio, void *object)
     +
     +/**
     + * kfree - free previously allocated memory
    -+ * @objp: pointer returned by kmalloc.
    ++ * @object: pointer returned by kmalloc.
     + *
    -+ * If @objp is NULL, no operation is performed.
    ++ * If @object is NULL, no operation is performed.
     + *
     + * Don't free memory not originally allocated by kmalloc()
     + * or you will run into trouble.
    @@ mm/slab_common.c: void free_large_kmalloc(struct folio *folio, void *object)
     +
     +/**
     + * __ksize -- Uninstrumented ksize.
    -+ * @objp: pointer to the object
    ++ * @object: pointer to the object
     + *
     + * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
     + * safety checks as ksize() with KASAN instrumentation enabled.
     + *
    -+ * Return: size of the actual memory used by @objp in bytes
    ++ * Return: size of the actual memory used by @object in bytes
     + */
     +size_t __ksize(const void *object)
     +{
    @@ mm/slab_common.c: gfp_t kmalloc_fix_flags(gfp_t flags)
       * know the allocation order to free the pages properly in kfree.
       */

    --void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    -+void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
    +-static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    ++static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
      {
      	struct page *page;
      	void *ptr = NULL;
    -@@ mm/slab_common.c: void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
    +@@ mm/slab_common.c: static void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)

      void *kmalloc_large(size_t size, gfp_t flags)
      {
13:  31be83f97c43 = 13:  446064fdf403 mm/sl[au]b: cleanup kmem_cache_alloc[_node]_trace()
14:  583b9ef311da = 14:  c923544d6d61 mm/slab_common: unify NUMA and UMA version of tracepoints
15:  d0b3552d07e0 = 15:  72633319472e mm/slab_common: drop kmem_alloc & avoid dereferencing fields when not using
16:  0db36c104255 ! 16:  c9b5ded32cc6 mm/slab_common: move declaration of __ksize() to mm/slab.h
    @@ mm/slab_common.c: void kfree(const void *object)

     -/**
     - * __ksize -- Uninstrumented ksize.
    -- * @objp: pointer to the object
    +- * @object: pointer to the object
     - *
     - * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
     - * safety checks as ksize() with KASAN instrumentation enabled.
     - *
    -- * Return: size of the actual memory used by @objp in bytes
    +- * Return: size of the actual memory used by @object in bytes
     - */
     +/* Uninstrumented ksize. Only called by KASAN. */
      size_t __ksize(const void *object)
17:  b261334803b4 = 17:  0248c8a1af52 mm/sl[au]b: check if large object is valid in __ksize()

-- 
Thanks,
Hyeonggon

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ