[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20200527182916.249910-1-shakeelb@google.com>
Date: Wed, 27 May 2020 11:29:14 -0700
From: Shakeel Butt <shakeelb@...gle.com>
To: Mel Gorman <mgorman@...e.de>, Johannes Weiner <hannes@...xchg.org>,
Roman Gushchin <guro@...com>, Michal Hocko <mhocko@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Minchan Kim <minchan@...nel.org>,
Rik van Riel <riel@...riel.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Shakeel Butt <shakeelb@...gle.com>
Subject: [PATCH resend 1/3] mm: swap: fix vmstats for huge pages
Many of the callbacks called by pagevec_lru_move_fn() does not correctly
update the vmstats for huge pages. Fix that. Also __pagevec_lru_add_fn()
use the irq-unsafe alternative to update the stat as the irqs are
already disabled.
Signed-off-by: Shakeel Butt <shakeelb@...gle.com>
Acked-by: Johannes Weiner <hannes@...xchg.org>
---
mm/swap.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/mm/swap.c b/mm/swap.c
index a37bd7b202ac..3dbef6517cac 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -225,7 +225,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
del_page_from_lru_list(page, lruvec, page_lru(page));
ClearPageActive(page);
add_page_to_lru_list_tail(page, lruvec, page_lru(page));
- (*pgmoved)++;
+ (*pgmoved) += hpage_nr_pages(page);
}
}
@@ -285,7 +285,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
add_page_to_lru_list(page, lruvec, lru);
trace_mm_lru_activate(page);
- __count_vm_event(PGACTIVATE);
+ __count_vm_events(PGACTIVATE, hpage_nr_pages(page));
update_page_reclaim_stat(lruvec, file, 1);
}
}
@@ -503,6 +503,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
{
int lru, file;
bool active;
+ int nr_pages = hpage_nr_pages(page);
if (!PageLRU(page))
return;
@@ -536,11 +537,11 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
* We moves tha page into tail of inactive.
*/
add_page_to_lru_list_tail(page, lruvec, lru);
- __count_vm_event(PGROTATED);
+ __count_vm_events(PGROTATED, nr_pages);
}
if (active)
- __count_vm_event(PGDEACTIVATE);
+ __count_vm_events(PGDEACTIVATE, nr_pages);
update_page_reclaim_stat(lruvec, file, 0);
}
@@ -929,6 +930,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
{
enum lru_list lru;
int was_unevictable = TestClearPageUnevictable(page);
+ int nr_pages = hpage_nr_pages(page);
VM_BUG_ON_PAGE(PageLRU(page), page);
@@ -966,13 +968,13 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
update_page_reclaim_stat(lruvec, page_is_file_lru(page),
PageActive(page));
if (was_unevictable)
- count_vm_event(UNEVICTABLE_PGRESCUED);
+ __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
lru = LRU_UNEVICTABLE;
ClearPageActive(page);
SetPageUnevictable(page);
if (!was_unevictable)
- count_vm_event(UNEVICTABLE_PGCULLED);
+ __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
}
add_page_to_lru_list(page, lruvec, lru);
--
2.27.0.rc0.183.gde8f92d652-goog
Powered by blists - more mailing lists