[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1617140178-8773-28-git-send-email-anthony.yznaga@oracle.com>
Date: Tue, 30 Mar 2021 14:36:02 -0700
From: Anthony Yznaga <anthony.yznaga@...cle.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: willy@...radead.org, corbet@....net, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
rppt@...nel.org, akpm@...ux-foundation.org, hughd@...gle.com,
ebiederm@...ssion.com, keescook@...omium.org, ardb@...nel.org,
nivedita@...m.mit.edu, jroedel@...e.de, masahiroy@...nel.org,
nathan@...nel.org, terrelln@...com, vincenzo.frascino@....com,
martin.b.radev@...il.com, andreyknvl@...gle.com,
daniel.kiper@...cle.com, rafael.j.wysocki@...el.com,
dan.j.williams@...el.com, Jonathan.Cameron@...wei.com,
bhe@...hat.com, rminnich@...il.com, ashish.kalra@....com,
guro@...com, hannes@...xchg.org, mhocko@...nel.org,
iamjoonsoo.kim@....com, vbabka@...e.cz, alex.shi@...ux.alibaba.com,
david@...hat.com, richard.weiyang@...il.com,
vdavydov.dev@...il.com, graf@...zon.com, jason.zeng@...el.com,
lei.l.li@...el.com, daniel.m.jordan@...cle.com,
steven.sistare@...cle.com, linux-fsdevel@...r.kernel.org,
linux-doc@...r.kernel.org, kexec@...ts.infradead.org
Subject: [RFC v2 27/43] mm: shmem: when inserting, handle pages already charged to a memcg
If shmem_insert_page() is called to insert a page that was preserved
using PKRAM on the current boot (i.e. preserved page is restored without
an intervening kexec boot), the page will still be charged to a memory
cgroup because it is never freed. Don't try to charge it again.
Signed-off-by: Anthony Yznaga <anthony.yznaga@...cle.com>
---
mm/shmem.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 8dfe80aeee97..44cc158ab34d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -671,7 +671,7 @@ static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping,
pgoff_t index, void *expected, gfp_t gfp,
- struct mm_struct *charge_mm)
+ struct mm_struct *charge_mm, bool skipcharge)
{
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
unsigned long i = 0;
@@ -688,7 +688,7 @@ static int shmem_add_to_page_cache(struct page *page,
page->mapping = mapping;
page->index = index;
- if (!PageSwapCache(page)) {
+ if (!skipcharge && !PageSwapCache(page)) {
error = mem_cgroup_charge(page, charge_mm, gfp);
if (error) {
if (PageTransHuge(page)) {
@@ -770,6 +770,7 @@ int shmem_insert_page(struct mm_struct *mm, struct inode *inode, pgoff_t index,
int nr;
pgoff_t hindex = index;
bool on_lru = PageLRU(page);
+ bool ischarged = page_memcg(page) ? true : false;
if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
return -EFBIG;
@@ -809,7 +810,8 @@ int shmem_insert_page(struct mm_struct *mm, struct inode *inode, pgoff_t index,
__SetPageReferenced(page);
err = shmem_add_to_page_cache(page, mapping, hindex,
- NULL, gfp & GFP_RECLAIM_MASK, mm);
+ NULL, gfp & GFP_RECLAIM_MASK,
+ mm, ischarged);
if (err)
goto out_unlock;
@@ -1829,7 +1831,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
error = shmem_add_to_page_cache(page, mapping, index,
swp_to_radix_entry(swap), gfp,
- charge_mm);
+ charge_mm, false);
if (error)
goto failed;
@@ -2009,7 +2011,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
error = shmem_add_to_page_cache(page, mapping, hindex,
NULL, gfp & GFP_RECLAIM_MASK,
- charge_mm);
+ charge_mm, false);
if (error)
goto unacct;
lru_cache_add(page);
@@ -2500,7 +2502,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
goto out_release;
ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
- gfp & GFP_RECLAIM_MASK, dst_mm);
+ gfp & GFP_RECLAIM_MASK, dst_mm, false);
if (ret)
goto out_release;
--
1.8.3.1
Powered by blists - more mailing lists