lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 12 Feb 2015 16:32:18 +0900
From:	Joonsoo Kim <iamjoonsoo.kim@....com>
To:	Andrew Morton <akpm@...ux-foundation.org>
Cc:	Rik van Riel <riel@...hat.com>,
	Johannes Weiner <hannes@...xchg.org>,
	Mel Gorman <mgorman@...e.de>,
	Laura Abbott <lauraa@...eaurora.org>,
	Minchan Kim <minchan@...nel.org>,
	Heesub Shin <heesub.shin@...sung.com>,
	Marek Szyprowski <m.szyprowski@...sung.com>,
	Michal Nazarewicz <mina86@...a86.com>,
	"Aneesh Kumar K.V" <aneesh.kumar@...ux.vnet.ibm.com>,
	linux-mm@...ck.org, linux-kernel@...r.kernel.org,
	Hui Zhu <zhuhui@...omi.com>, Gioh Kim <gioh.kim@....com>,
	Bartlomiej Zolnierkiewicz <b.zolnierkie@...sung.com>,
	Ritesh Harjani <ritesh.list@...il.com>,
	Vlastimil Babka <vbabka@...e.cz>,
	Joonsoo Kim <iamjoonsoo.kim@....com>
Subject: [RFC 14/16] mm/cma: print stealed page count

Reserved pages for CMA could be on different zone. To figure out
memory map correctly, per zone number of stealed pages for CMA
would be needed.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@....com>
---
 mm/cma.c |   28 +++++++++++++++++++++++++++-
 1 file changed, 27 insertions(+), 1 deletion(-)

diff --git a/mm/cma.c b/mm/cma.c
index 267fa14..b165c1a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -40,6 +40,8 @@ struct cma cma_areas[MAX_CMA_AREAS];
 unsigned cma_area_count;
 static DEFINE_MUTEX(cma_mutex);
 
+static unsigned long __initdata stealed_pages[MAX_NUMNODES][MAX_NR_ZONES];
+
 unsigned long cma_total_pages(unsigned long node_start_pfn,
 				unsigned long node_end_pfn)
 {
@@ -98,6 +100,7 @@ static int __init cma_activate_area(struct cma *cma)
 	unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
 	unsigned i = cma->count >> pageblock_order;
 	int nid;
+	int zone_index;
 
 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 
@@ -125,6 +128,8 @@ static int __init cma_activate_area(struct cma *cma)
 			if (page_to_nid(pfn_to_page(pfn)) != nid)
 				goto err;
 		}
+		zone_index = zone_idx(page_zone(pfn_to_page(base_pfn)));
+		stealed_pages[nid][zone_index] += pageblock_nr_pages;
 		init_cma_reserved_pageblock(base_pfn);
 	} while (--i);
 
@@ -145,7 +150,9 @@ err:
 
 static int __init cma_init_reserved_areas(void)
 {
-	int i;
+	int i, j;
+	pg_data_t *pgdat;
+	struct zone *zone;
 
 	for (i = 0; i < cma_area_count; i++) {
 		int ret = cma_activate_area(&cma_areas[i]);
@@ -154,6 +161,25 @@ static int __init cma_init_reserved_areas(void)
 			return ret;
 	}
 
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		for (j = 0; j < MAX_NR_ZONES; j++) {
+			if (stealed_pages[i][j])
+				goto print;
+		}
+		continue;
+
+print:
+		pgdat = NODE_DATA(i);
+		for (j = 0; j < MAX_NR_ZONES; j++) {
+			if (!stealed_pages[i][j])
+				continue;
+
+			zone = pgdat->node_zones + j;
+			pr_info("Steal %lu pages from %s\n",
+				stealed_pages[i][j], zone->name);
+		}
+	}
+
 	return 0;
 }
 core_initcall(cma_init_reserved_areas);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ