Index: linux-2.6-git/kernel/sysctl.c =================================================================== --- linux-2.6-git.orig/kernel/sysctl.c 2010-02-05 16:48:58.000000000 +0100 +++ linux-2.6-git/kernel/sysctl.c 2010-02-05 16:49:03.000000000 +0100 @@ -262,6 +262,8 @@ extern unsigned long perf_count_pages_direct_reclaim; extern unsigned long perf_count_failed_pages_direct_reclaim; extern unsigned long perf_count_failed_pages_direct_reclaim_but_progress; +extern unsigned long perf_count_call_congestion_wait_from_alloc_pages_high_priority; +extern unsigned long perf_count_call_congestion_wait_from_alloc_pages_slowpath; static struct ctl_table perf_table[] = { { .ctl_name = CTL_UNNUMBERED, @@ -270,6 +272,22 @@ .mode = 0666, .maxlen = sizeof(unsigned long), .proc_handler = &proc_doulongvec_minmax, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "perf_count_call_congestion_wait_from_alloc_pages_high_priority", + .data = &perf_count_call_congestion_wait_from_alloc_pages_high_priority, + .mode = 0666, + .maxlen = sizeof(unsigned long), + .proc_handler = &proc_doulongvec_minmax, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "perf_count_call_congestion_wait_from_alloc_pages_slowpath", + .data = &perf_count_call_congestion_wait_from_alloc_pages_slowpath, + .mode = 0666, + .maxlen = sizeof(unsigned long), + .proc_handler = &proc_doulongvec_minmax, }, { .ctl_name = CTL_UNNUMBERED, Index: linux-2.6-git/mm/page_alloc.c =================================================================== --- linux-2.6-git.orig/mm/page_alloc.c 2010-02-05 16:48:58.000000000 +0100 +++ linux-2.6-git/mm/page_alloc.c 2010-02-05 16:49:03.000000000 +0100 @@ -1663,6 +1663,7 @@ unsigned long perf_count_pages_direct_reclaim = 0; unsigned long perf_count_failed_pages_direct_reclaim = 0; +unsigned long perf_count_failed_pages_direct_reclaim_but_progress = 0; /* The really slow allocator path where we enter direct reclaim */ static inline struct page * @@ -1704,12 +1705,13 @@ perf_count_pages_direct_reclaim++; if (!page) perf_count_failed_pages_direct_reclaim++; - if (!page && !(*did_some_progress)) - perf_count_failed_pages_direct_reclaim_but_progress++ + if (!page && (*did_some_progress)) + perf_count_failed_pages_direct_reclaim_but_progress++; return page; } +unsigned long perf_count_call_congestion_wait_from_alloc_pages_high_priority = 0; /* * This is called in the allocator slow-path if the allocation request is of * sufficient urgency to ignore watermarks and take other desperate measures @@ -1727,8 +1729,10 @@ zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, preferred_zone, migratetype); - if (!page && gfp_mask & __GFP_NOFAIL) + if (!page && gfp_mask & __GFP_NOFAIL) { + perf_count_call_congestion_wait_from_alloc_pages_high_priority++; congestion_wait(BLK_RW_ASYNC, HZ/50); + } } while (!page && (gfp_mask & __GFP_NOFAIL)); return page; @@ -1783,6 +1787,7 @@ return alloc_flags; } +unsigned long perf_count_call_congestion_wait_from_alloc_pages_slowpath = 0; static inline struct page * __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, @@ -1899,6 +1904,7 @@ pages_reclaimed += did_some_progress; if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { /* Wait for some write requests to complete then retry */ + perf_count_call_congestion_wait_from_alloc_pages_slowpath++; congestion_wait(BLK_RW_ASYNC, HZ/50); goto rebalance; }