[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20121106195428.6941.92699.stgit@srivatsabhat.in.ibm.com>
Date: Wed, 07 Nov 2012 01:24:32 +0530
From: "Srivatsa S. Bhat" <srivatsa.bhat@...ux.vnet.ibm.com>
To: akpm@...ux-foundation.org, mgorman@...e.de, mjg59@...f.ucam.org,
paulmck@...ux.vnet.ibm.com, dave@...ux.vnet.ibm.com,
maxime.coquelin@...ricsson.com, loic.pallardy@...ricsson.com,
arjan@...ux.intel.com, kmpark@...radead.org,
kamezawa.hiroyu@...fujitsu.com, lenb@...nel.org, rjw@...k.pl
Cc: gargankita@...il.com, amit.kachhap@...aro.org,
svaidy@...ux.vnet.ibm.com, thomas.abraham@...aro.org,
santosh.shilimkar@...com, srivatsa.bhat@...ux.vnet.ibm.com,
linux-pm@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 8/8] mm: Print memory region statistics to understand the
buddy allocator behavior
In order to observe the behavior of the region-aware buddy allocator, modify
vmstat.c to also print memory region related statistics. In particular, enable
memory region-related info in /proc/zoneinfo and /proc/buddyinfo, since they
would help us to atleast (roughly) see how the new buddy allocator is
performing.
For now, the region statistics correspond to the zone memory regions and not
the (absolute) node memory regions, and some of the statistics (especially the
no. of present pages) might not be very accurate. But since we account for
and print the free page statistics for every zone memory region accurately, we
should be able to observe the new page allocator behavior to a reasonable
degree of accuracy.
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@...ux.vnet.ibm.com>
---
mm/vmstat.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 53 insertions(+), 4 deletions(-)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8183331..cbcd373 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -812,11 +812,31 @@ const char * const vmstat_text[] = {
static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
- int order;
+ int i, order, t;
+ struct free_area *area;
- seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
- for (order = 0; order < MAX_ORDER; ++order)
- seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
+ seq_printf(m, "Node %d, zone %8s \n", pgdat->node_id, zone->name);
+
+ for (i = 0; i < zone->nr_zone_regions; i++) {
+
+ seq_printf(m, "\t\t Region %d ", i);
+
+ for (order = 0; order < MAX_ORDER; ++order) {
+ unsigned long nr_free = 0;
+
+ area = &zone->free_area[order];
+
+ for (t = 0; t < MIGRATE_TYPES; t++) {
+ if (t == MIGRATE_ISOLATE ||
+ t == MIGRATE_RESERVE)
+ continue;
+ nr_free +=
+ area->free_list[t].mr_list[i].nr_free;
+ }
+ seq_printf(m, "%6lu ", nr_free);
+ }
+ seq_putc(m, '\n');
+ }
seq_putc(m, '\n');
}
@@ -984,6 +1004,8 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
int i;
+ unsigned long zone_nr_free = 0;
+
seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
seq_printf(m,
"\n pages free %lu"
@@ -1001,6 +1023,33 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
zone->spanned_pages,
zone->present_pages);
+ for (i = 0; i < zone->nr_zone_regions; i++) {
+ int order, t;
+ unsigned long nr_free = 0;
+ struct free_area *area = zone->free_area;
+
+ for_each_migratetype_order(order, t) {
+ if (t == MIGRATE_ISOLATE || t == MIGRATE_RESERVE)
+ continue;
+ nr_free +=
+ area[order].free_list[t].mr_list[i].nr_free
+ * (1UL << order);
+ }
+ seq_printf(m, "\n\nZone mem region %d", i);
+ seq_printf(m,
+ "\n pages spanned %lu"
+ "\n present %lu"
+ "\n free %lu",
+ zone->zone_mem_region[i].spanned_pages,
+ zone->zone_mem_region[i].present_pages,
+ nr_free);
+ }
+
+ for (i = 0; i < MAX_ORDER; i++)
+ zone_nr_free += zone->free_area[i].nr_free * (1UL << i);
+
+ seq_printf(m, "\nZone pages nr_free %lu\n", zone_nr_free);
+
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
seq_printf(m, "\n %-12s %lu", vmstat_text[i],
zone_page_state(zone, i));
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists