[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1469110261-7365-3-git-send-email-mgorman@techsingularity.net>
Date: Thu, 21 Jul 2016 15:10:58 +0100
From: Mel Gorman <mgorman@...hsingularity.net>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Johannes Weiner <hannes@...xchg.org>,
Minchan Kim <minchan@...nel.org>,
Michal Hocko <mhocko@...e.cz>,
Vlastimil Babka <vbabka@...e.cz>,
Linux-MM <linux-mm@...ck.org>,
LKML <linux-kernel@...r.kernel.org>,
Mel Gorman <mgorman@...hsingularity.net>
Subject: [PATCH 2/5] mm, vmscan: Remove highmem_file_pages
With the reintroduction of per-zone LRU stats, highmem_file_pages is
redundant so remove it.
Signed-off-by: Mel Gorman <mgorman@...hsingularity.net>
---
include/linux/mm_inline.h | 17 -----------------
mm/page-writeback.c | 12 ++++--------
2 files changed, 4 insertions(+), 25 deletions(-)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 9cc130f5feb2..71613e8a720f 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -4,22 +4,6 @@
#include <linux/huge_mm.h>
#include <linux/swap.h>
-#ifdef CONFIG_HIGHMEM
-extern atomic_t highmem_file_pages;
-
-static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
- int nr_pages)
-{
- if (is_highmem_idx(zid) && is_file_lru(lru))
- atomic_add(nr_pages, &highmem_file_pages);
-}
-#else
-static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
- int nr_pages)
-{
-}
-#endif
-
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
* @page: the page to test
@@ -47,7 +31,6 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
__mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
- acct_highmem_file_pages(zid, lru, nr_pages);
}
static __always_inline void update_lru_size(struct lruvec *lruvec,
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 573d138fa7a5..cfa78124c3c2 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -299,17 +299,13 @@ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
return nr_pages;
}
-#ifdef CONFIG_HIGHMEM
-atomic_t highmem_file_pages;
-#endif
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
- unsigned long x;
+ unsigned long x = 0;
int i;
- unsigned long dirtyable = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
@@ -326,12 +322,12 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
nr_pages = zone_page_state(z, NR_FREE_PAGES);
/* watch for underflows */
nr_pages -= min(nr_pages, high_wmark_pages(z));
- dirtyable += nr_pages;
+ nr_pages += zone_page_state(z, NR_INACTIVE_FILE);
+ nr_pages += zone_page_state(z, NR_ACTIVE_FILE);
+ x += nr_pages;
}
}
- x = dirtyable + atomic_read(&highmem_file_pages);
-
/*
* Unreclaimable memory (kernel memory or anonymous memory
* without swap) can bring down the dirtyable pages below
--
2.6.4
Powered by blists - more mailing lists