[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230415092716.61970-1-wangkefeng.wang@huawei.com>
Date: Sat, 15 Apr 2023 17:27:16 +0800
From: Kefeng Wang <wangkefeng.wang@...wei.com>
To: Andrew Morton <akpm@...ux-foundation.org>, <linux-mm@...ck.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>
CC: SeongJae Park <sj@...nel.org>, Hugh Dickins <hughd@...gle.com>,
<linux-kernel@...r.kernel.org>,
Kefeng Wang <wangkefeng.wang@...wei.com>
Subject: [PATCH] mm: rename reclaim_pages() to reclaim_folios()
As commit a83f0551f496 ("mm/vmscan: convert reclaim_pages() to use
a folio") changes the arg from page_list to folio_list, but not
the defination, let's correct it and rename it to reclaim_folios too.
Signed-off-by: Kefeng Wang <wangkefeng.wang@...wei.com>
---
include/linux/swap.h | 2 +-
mm/damon/paddr.c | 2 +-
mm/madvise.c | 4 ++--
mm/shmem.c | 4 ++--
mm/vmscan.c | 2 +-
5 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7f7d5b9ddf7e..8c8c6ceaa462 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -442,7 +442,7 @@ extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
long remove_mapping(struct address_space *mapping, struct folio *folio);
-extern unsigned long reclaim_pages(struct list_head *page_list);
+unsigned long reclaim_folios(struct list_head *folio_list);
#ifdef CONFIG_NUMA
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index dd9c33fbe805..840d25ad9e59 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -255,7 +255,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
list_add(&folio->lru, &folio_list);
folio_put(folio);
}
- applied = reclaim_pages(&folio_list);
+ applied = reclaim_folios(&folio_list);
cond_resched();
return applied * PAGE_SIZE;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index b5ffbaf616f5..bfc683de85ef 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -417,7 +417,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
huge_unlock:
spin_unlock(ptl);
if (pageout)
- reclaim_pages(&folio_list);
+ reclaim_folios(&folio_list);
return 0;
}
@@ -513,7 +513,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(orig_pte, ptl);
if (pageout)
- reclaim_pages(&folio_list);
+ reclaim_folios(&folio_list);
cond_resched();
return 0;
diff --git a/mm/shmem.c b/mm/shmem.c
index 16378b281a5d..bdb2948a149f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2381,7 +2381,7 @@ static void shmem_isolate_pages_range(struct address_space *mapping, loff_t star
folio_put(folio);
/*
- * Prepare the folios to be passed to reclaim_pages().
+ * Prepare the folios to be passed to reclaim_folios().
* VM can't reclaim a folio unless young bit is
* cleared in its flags.
*/
@@ -2406,7 +2406,7 @@ static int shmem_fadvise_dontneed(struct address_space *mapping, loff_t start,
lru_add_drain();
shmem_isolate_pages_range(mapping, start, end, &folio_list);
- reclaim_pages(&folio_list);
+ reclaim_folios(&folio_list);
return 0;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2cd21e1d5849..b218c8a6244f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2786,7 +2786,7 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list,
return nr_reclaimed;
}
-unsigned long reclaim_pages(struct list_head *folio_list)
+unsigned long reclaim_folios(struct list_head *folio_list)
{
int nid;
unsigned int nr_reclaimed = 0;
--
2.35.3
Powered by blists - more mailing lists