lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [day] [month] [year] [list]
Date:	Wed, 21 Aug 2013 22:19:52 -0700
From:	Yinghai Lu <yinghai@...nel.org>
To:	Wanpeng Li <liwanp@...ux.vnet.ibm.com>
Cc:	Dave Hansen <dave.hansen@...ux.intel.com>,
	Rik van Riel <riel@...hat.com>,
	Fengguang Wu <fengguang.wu@...el.com>,
	Joonsoo Kim <iamjoonsoo.kim@....com>,
	Johannes Weiner <hannes@...xchg.org>,
	Tejun Heo <tj@...nel.org>,
	Yasuaki Ishimatsu <isimatu.yasuaki@...fujitsu.com>,
	David Rientjes <rientjes@...gle.com>,
	KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>,
	Jiri Kosina <jkosina@...e.cz>, Linux MM <linux-mm@...ck.org>,
	Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v2 2/4] mm/sparse: introduce alloc_usemap_and_memmap

On Wed, Aug 21, 2013 at 12:29 AM, Wanpeng Li <liwanp@...ux.vnet.ibm.com> wrote:
> Hi Yinghai,
> On Tue, Aug 20, 2013 at 09:28:29PM -0700, Yinghai Lu wrote:
>>On Tue, Aug 20, 2013 at 8:11 PM, Wanpeng Li <liwanp@...ux.vnet.ibm.com> wrote:
>>> Hi Yinghai,
>>> On Tue, Aug 20, 2013 at 05:02:17PM -0700, Yinghai Lu wrote:
>>>>>> -     /* ok, last chunk */
>>>>>> -     sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
>>>>>> -                                      usemap_count, nodeid_begin);
>>>>>> +     alloc_usemap_and_memmap(usemap_map, true);
>>>>
>>>>alloc_usemap_and_memmap() is somehow confusing.
>>>>
>>>>Please check if you can pass function pointer instead of true/false.
>>>>
>>>
>>> sparse_early_usemaps_alloc_node and sparse_early_mem_maps_alloc_node is
>>> similar, however, one has a parameter unsigned long ** and the other has
>>> struct page **. function pointer can't help, isn't it? ;-)
>>
>>you could have one generic function pointer like
>>void *alloc_func(void *data);
>>
>>and in the every alloc function, have own struct data to pass in/out...
>>
>>Yinghai
>
> How about this?


>From a78e12a9ff31f2a73b87145ce7ad943a0f712708 Mon Sep 17 00:00:00 2001
From: Wanpeng Li <liwanp@...ux.vnet.ibm.com>
Date: Wed, 21 Aug 2013 15:23:08 +0800
Subject: [PATCH] mm/sparse: introduce alloc_usemap_and_memmap fix

Pass function pointer to alloc_usemap_and_memmap() instead of true/false.

Signed-off-by: Wanpeng Li <liwanp@...ux.vnet.ibm.com>
---
 mm/sparse.c |   54 +++++++++++++++++++++++++-----------------------------
 1 files changed, 25 insertions(+), 29 deletions(-)

diff --git a/mm/sparse.c b/mm/sparse.c
index 55e5752..06adf3c 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -339,14 +339,16 @@ static void __init check_usemap_section_nr(int
nid, unsigned long *usemap)
 }
 #endif /* CONFIG_MEMORY_HOTREMOVE */

-static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+static void __init sparse_early_usemaps_alloc_node(void **usemap_map,
                  unsigned long pnum_begin,
                  unsigned long pnum_end,
                  unsigned long usemap_count, int nodeid)
 {
     void *usemap;
     unsigned long pnum;
+    unsigned long **map;
     int size = usemap_size();
+    map = (unsigned long **) usemap_map;

     usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
                               size * usemap_count);
@@ -358,9 +360,9 @@ static void __init
sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
     for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
         if (!present_section_nr(pnum))
             continue;
-        usemap_map[pnum] = usemap;
+        map[pnum] = usemap;
         usemap += size;
-        check_usemap_section_nr(nodeid, usemap_map[pnum]);
+        check_usemap_section_nr(nodeid, map[pnum]);
     }
 }

@@ -430,23 +432,16 @@ void __init sparse_mem_maps_populate_node(struct
page **map_map,
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */

 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
-static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
+static void __init sparse_early_mem_maps_alloc_node(void **map_map,
                  unsigned long pnum_begin,
                  unsigned long pnum_end,
                  unsigned long map_count, int nodeid)
 {
-    sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
+    struct page **map = (struct page **)map_map;
+    sparse_mem_maps_populate_node(map, pnum_begin, pnum_end,
                      map_count, nodeid);
 }

======> maybe you can have:

struct alloc_info {
   struct page **map_map;
   unsigned long pnum_begin;
   unsigned long pnum_end;
   unsigned long map_count;
   int nodeid;
};

static void __init sparse_early_mem_maps_alloc_node(void *data)
 {
    struct alloc_info *info = (struct alloc_info *)data;

    sparse_mem_maps_populate_node(info->map_map, info->pnum_begin,
info->pnum_end,
                      info->map_count, info->nodeid);
 }

==============

 #else
-
-static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
-                unsigned long pnum_begin,
-                unsigned long pnum_end,
-                unsigned long map_count, int nodeid)
-{
-}
-
 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
 {
     struct page *map;
@@ -471,9 +466,10 @@ void __attribute__((weak)) __meminit
vmemmap_populate_print_last(void)
 /**
  *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
  *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
- *  @use_map: true if memory allocated for pageblock flags, otherwise false
  */
-static void alloc_usemap_and_memmap(unsigned long **map, bool use_map)
+static void alloc_usemap_and_memmap(void (*sparse_early_maps_alloc_node)
+                (void **, unsigned long, unsigned long,
+                unsigned long, int), void **map)
 {
     unsigned long pnum;
     unsigned long map_count;
@@ -504,24 +500,16 @@ static void alloc_usemap_and_memmap(unsigned
long **map, bool use_map)
             continue;
         }
         /* ok, we need to take cake of from pnum_begin to pnum - 1*/
-        if (use_map)
-            sparse_early_usemaps_alloc_node(map, pnum_begin, pnum,
-                         map_count, nodeid_begin);
-        else
-            sparse_early_mem_maps_alloc_node((struct page **)map,
-                pnum_begin, pnum, map_count, nodeid_begin);
+        (*sparse_early_maps_alloc_node)(map, pnum_begin, pnum,
+                    map_count, nodeid_begin);


========> can you use sparse_early_maps_alloc_node() ?

         /* new start, update count etc*/
         nodeid_begin = nodeid;
         pnum_begin = pnum;
         map_count = 1;
     }
     /* ok, last chunk */
-    if (use_map)
-        sparse_early_usemaps_alloc_node(map, pnum_begin,
-                NR_MEM_SECTIONS, map_count, nodeid_begin);
-    else
-        sparse_early_mem_maps_alloc_node((struct page **)map,
-            pnum_begin, NR_MEM_SECTIONS, map_count, nodeid_begin);
+    (*sparse_early_maps_alloc_node)(map, pnum_begin, NR_MEM_SECTIONS,
+                    map_count, nodeid_begin);
 }

 /*
@@ -540,6 +528,10 @@ void __init sparse_init(void)
     struct page **map_map;
 #endif

+    void (*sparse_early_maps_alloc_node)(void **map,
+            unsigned long pnum_begin, unsigned long pnum_end,
+                unsigned long map_count, int nodeid);
+
     /* see include/linux/mmzone.h 'struct mem_section' definition */
     BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));

@@ -561,14 +553,18 @@ void __init sparse_init(void)
     usemap_map = alloc_bootmem(size);
     if (!usemap_map)
         panic("can not allocate usemap_map\n");
-    alloc_usemap_and_memmap(usemap_map, true);
+    sparse_early_maps_alloc_node = sparse_early_usemaps_alloc_node;

why do you need to assign again?

+    alloc_usemap_and_memmap(sparse_early_maps_alloc_node,
+                        (void **)usemap_map);

 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
     size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
     map_map = alloc_bootmem(size2);
     if (!map_map)
         panic("can not allocate map_map\n");
-    alloc_usemap_and_memmap((unsigned long **)map_map, false);
+    sparse_early_maps_alloc_node = sparse_early_mem_maps_alloc_node;
+    alloc_usemap_and_memmap(sparse_early_maps_alloc_node,
+                        (void **)map_map);
 #endif

     for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
-- 
1.7.7.6
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ