[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <28c262360907152122v4f30594cr677b744df3aaefcc@mail.gmail.com>
Date: Thu, 16 Jul 2009 13:22:32 +0900
From: Minchan Kim <minchan.kim@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>,
LKML <linux-kernel@...r.kernel.org>,
linux-mm <linux-mm@...ck.org>,
Wu Fengguang <fengguang.wu@...el.com>,
Rik van Riel <riel@...hat.com>,
Christoph Lameter <cl@...ux-foundation.org>
Subject: Re: [PATCH 3/3] add isolate pages vmstat
On Thu, Jul 16, 2009 at 12:16 PM, Andrew
Morton<akpm@...ux-foundation.org> wrote:
> On Thu, 16 Jul 2009 09:55:47 +0900 (JST) KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com> wrote:
>
>> ChangeLog
>> Since v5
>> - Rewrote the description
>> - Treat page migration
>> Since v4
>> - Changed displaing order in show_free_areas() (as Wu's suggested)
>> Since v3
>> - Fixed misaccount page bug when lumby reclaim occur
>> Since v2
>> - Separated IsolateLRU field to Isolated(anon) and Isolated(file)
>> Since v1
>> - Renamed IsolatePages to IsolatedLRU
>>
>> ==================================
>> Subject: [PATCH] add isolate pages vmstat
>>
>> If the system is running a heavy load of processes then concurrent reclaim
>> can isolate a large numbe of pages from the LRU. /proc/meminfo and the
>> output generated for an OOM do not show how many pages were isolated.
>>
>> This patch shows the information about isolated pages.
>>
>>
>> reproduce way
>> -----------------------
>> % ./hackbench 140 process 1000
>> => OOM occur
>>
>> active_anon:146 inactive_anon:0 isolated_anon:49245
>> active_file:79 inactive_file:18 isolated_file:113
>> unevictable:0 dirty:0 writeback:0 unstable:0 buffer:39
>> free:370 slab_reclaimable:309 slab_unreclaimable:5492
>> mapped:53 shmem:15 pagetables:28140 bounce:0
>>
>>
>> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>
>> Acked-by: Rik van Riel <riel@...hat.com>
>> Acked-by: Wu Fengguang <fengguang.wu@...el.com>
>> Reviewed-by: Minchan Kim <minchan.kim@...il.com>
>> ---
>> drivers/base/node.c | 4 ++++
>> fs/proc/meminfo.c | 4 ++++
>> include/linux/mmzone.h | 2 ++
>> mm/migrate.c | 11 +++++++++++
>> mm/page_alloc.c | 12 +++++++++---
>> mm/vmscan.c | 12 +++++++++++-
>> mm/vmstat.c | 2 ++
>> 7 files changed, 43 insertions(+), 4 deletions(-)
>>
>> Index: b/fs/proc/meminfo.c
>> ===================================================================
>> --- a/fs/proc/meminfo.c
>> +++ b/fs/proc/meminfo.c
>> @@ -65,6 +65,8 @@ static int meminfo_proc_show(struct seq_
>> "Active(file): %8lu kB\n"
>> "Inactive(file): %8lu kB\n"
>> "Unevictable: %8lu kB\n"
>> + "Isolated(anon): %8lu kB\n"
>> + "Isolated(file): %8lu kB\n"
>> "Mlocked: %8lu kB\n"
>
> Are these counters really important enough to justify being present in
> /proc/meminfo? They seem fairly low-level developer-only details.
> Perhaps relegate them to /proc/vmstat?
>
>> #ifdef CONFIG_HIGHMEM
>> "HighTotal: %8lu kB\n"
>> @@ -110,6 +112,8 @@ static int meminfo_proc_show(struct seq_
>> K(pages[LRU_ACTIVE_FILE]),
>> K(pages[LRU_INACTIVE_FILE]),
>> K(pages[LRU_UNEVICTABLE]),
>> + K(global_page_state(NR_ISOLATED_ANON)),
>> + K(global_page_state(NR_ISOLATED_FILE)),
>> K(global_page_state(NR_MLOCK)),
>> #ifdef CONFIG_HIGHMEM
>> K(i.totalhigh),
>> Index: b/include/linux/mmzone.h
>> ===================================================================
>> --- a/include/linux/mmzone.h
>> +++ b/include/linux/mmzone.h
>> @@ -100,6 +100,8 @@ enum zone_stat_item {
>> NR_BOUNCE,
>> NR_VMSCAN_WRITE,
>> NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
>> + NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
>> + NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
>> NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
>> #ifdef CONFIG_NUMA
>> NUMA_HIT, /* allocated in intended node */
>> Index: b/mm/page_alloc.c
>> ===================================================================
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -2115,16 +2115,18 @@ void show_free_areas(void)
>> }
>> }
>>
>> - printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
>> - " inactive_file:%lu"
>> + printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
>> + " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
>> " unevictable:%lu"
>> " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
>> " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
>> " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
>> global_page_state(NR_ACTIVE_ANON),
>> - global_page_state(NR_ACTIVE_FILE),
>> global_page_state(NR_INACTIVE_ANON),
>> + global_page_state(NR_ISOLATED_ANON),
>> + global_page_state(NR_ACTIVE_FILE),
>> global_page_state(NR_INACTIVE_FILE),
>> + global_page_state(NR_ISOLATED_FILE),
>> global_page_state(NR_UNEVICTABLE),
>> global_page_state(NR_FILE_DIRTY),
>> global_page_state(NR_WRITEBACK),
>> @@ -2152,6 +2154,8 @@ void show_free_areas(void)
>> " active_file:%lukB"
>> " inactive_file:%lukB"
>> " unevictable:%lukB"
>> + " isolated(anon):%lukB"
>> + " isolated(file):%lukB"
>> " present:%lukB"
>> " mlocked:%lukB"
>> " dirty:%lukB"
>> @@ -2178,6 +2182,8 @@ void show_free_areas(void)
>> K(zone_page_state(zone, NR_ACTIVE_FILE)),
>> K(zone_page_state(zone, NR_INACTIVE_FILE)),
>> K(zone_page_state(zone, NR_UNEVICTABLE)),
>> + K(zone_page_state(zone, NR_ISOLATED_ANON)),
>> + K(zone_page_state(zone, NR_ISOLATED_FILE)),
>> K(zone->present_pages),
>> K(zone_page_state(zone, NR_MLOCK)),
>> K(zone_page_state(zone, NR_FILE_DIRTY)),
>> Index: b/mm/vmscan.c
>> ===================================================================
>> --- a/mm/vmscan.c
>> +++ b/mm/vmscan.c
>> @@ -1067,6 +1067,8 @@ static unsigned long shrink_inactive_lis
>> unsigned long nr_active;
>> unsigned int count[NR_LRU_LISTS] = { 0, };
>> int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
>> + unsigned long nr_anon;
>> + unsigned long nr_file;
>>
>> nr_taken = sc->isolate_pages(sc->swap_cluster_max,
>> &page_list, &nr_scan, sc->order, mode,
>> @@ -1097,6 +1099,10 @@ static unsigned long shrink_inactive_lis
>> __mod_zone_page_state(zone, NR_INACTIVE_ANON,
>> -count[LRU_INACTIVE_ANON]);
>>
>> + nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
>> + nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
>> + __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
>> + __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
>>
>> reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
>> reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
>> @@ -1164,6 +1170,9 @@ static unsigned long shrink_inactive_lis
>> spin_lock_irq(&zone->lru_lock);
>> }
>> }
>> + __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
>> + __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
>> +
>> } while (nr_scanned < max_scan);
>
> This is a non-trivial amount of extra stuff. Do we really need it?
>
I thought so.
This patch results form process fork bomb(ex, mstctl11 in LTP).
Too many isolated patches are based on isolation counter.
So, I think we need this until now.
If we can solve the problem with different method, then we can drop this.
--
Kind regards,
Minchan Kim
Powered by blists - more mailing lists