[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221206171340.139790-3-hannes@cmpxchg.org>
Date: Tue, 6 Dec 2022 18:13:40 +0100
From: Johannes Weiner <hannes@...xchg.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>,
Hugh Dickins <hughd@...gle.com>,
Shakeel Butt <shakeelb@...gle.com>,
Michal Hocko <mhocko@...e.com>, linux-mm@...ck.org,
cgroups@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 2/3] mm: rmap: remove lock_page_memcg()
The previous patch made sure charge moving only touches pages for
which page_mapped() is stable. lock_page_memcg() is no longer needed.
Signed-off-by: Johannes Weiner <hannes@...xchg.org>
---
mm/rmap.c | 26 ++++++++------------------
1 file changed, 8 insertions(+), 18 deletions(-)
diff --git a/mm/rmap.c b/mm/rmap.c
index b616870a09be..32e48b1c5847 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1222,9 +1222,6 @@ void page_add_anon_rmap(struct page *page,
bool compound = flags & RMAP_COMPOUND;
bool first = true;
- if (unlikely(PageKsm(page)))
- lock_page_memcg(page);
-
/* Is page being mapped by PTE? Is this its first map to be added? */
if (likely(!compound)) {
first = atomic_inc_and_test(&page->_mapcount);
@@ -1262,15 +1259,14 @@ void page_add_anon_rmap(struct page *page,
if (nr)
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
- if (unlikely(PageKsm(page)))
- unlock_page_memcg(page);
-
- /* address might be in next vma when migration races vma_adjust */
- else if (first)
- __page_set_anon_rmap(page, vma, address,
- !!(flags & RMAP_EXCLUSIVE));
- else
- __page_check_anon_rmap(page, vma, address);
+ if (likely(!PageKsm(page))) {
+ /* address might be in next vma when migration races vma_adjust */
+ if (first)
+ __page_set_anon_rmap(page, vma, address,
+ !!(flags & RMAP_EXCLUSIVE));
+ else
+ __page_check_anon_rmap(page, vma, address);
+ }
mlock_vma_page(page, vma, compound);
}
@@ -1329,7 +1325,6 @@ void page_add_file_rmap(struct page *page,
bool first;
VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
- lock_page_memcg(page);
/* Is page being mapped by PTE? Is this its first map to be added? */
if (likely(!compound)) {
@@ -1365,7 +1360,6 @@ void page_add_file_rmap(struct page *page,
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
if (nr)
__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
- unlock_page_memcg(page);
mlock_vma_page(page, vma, compound);
}
@@ -1394,8 +1388,6 @@ void page_remove_rmap(struct page *page,
return;
}
- lock_page_memcg(page);
-
/* Is page being unmapped by PTE? Is this its last map to be removed? */
if (likely(!compound)) {
last = atomic_add_negative(-1, &page->_mapcount);
@@ -1451,8 +1443,6 @@ void page_remove_rmap(struct page *page,
* and remember that it's only reliable while mapped.
*/
- unlock_page_memcg(page);
-
munlock_vma_page(page, vma, compound);
}
--
2.38.1
Powered by blists - more mailing lists