[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211218185205.1744125-3-nikita.yushchenko@virtuozzo.com>
Date: Sat, 18 Dec 2021 21:52:05 +0300
From: Nikita Yushchenko <nikita.yushchenko@...tuozzo.com>
To: Will Deacon <will@...nel.org>,
"Aneesh Kumar K.V" <aneesh.kumar@...ux.ibm.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Nick Piggin <npiggin@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Catalin Marinas <catalin.marinas@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Arnd Bergmann <arnd@...db.de>, Sam Ravnborg <sam@...nborg.org>
Cc: x86@...nel.org, linux-kernel@...r.kernel.org,
linux-arch@...r.kernel.org, linux-mm@...ck.org, kernel@...nvz.org
Subject: [PATCH/RFC v2 2/3] mm/swap: introduce free_pages_and_swap_cache_nolru()
This is a variant of free_pages_and_swap_cache() that does not call
lru_add_drain(), for better performance in case when the passed pages
are guaranteed to not be in LRU.
Signed-off-by: Nikita Yushchenko <nikita.yushchenko@...tuozzo.com>
---
include/linux/swap.h | 5 ++++-
mm/swap_state.c | 29 ++++++++++++++++++++++-------
2 files changed, 26 insertions(+), 8 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d1ea44b31f19..86a1b0a61889 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -460,6 +460,7 @@ extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
extern void free_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
+extern void free_pages_and_swap_cache_nolru(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t entry,
struct vm_area_struct *vma,
unsigned long addr);
@@ -565,7 +566,9 @@ static inline struct address_space *swap_address_space(swp_entry_t entry)
#define free_page_and_swap_cache(page) \
put_page(page)
#define free_pages_and_swap_cache(pages, nr) \
- release_pages((pages), (nr));
+ release_pages((pages), (nr))
+#define free_pages_and_swap_cache_nolru(pages, nr) \
+ release_pages((pages), (nr))
static inline void free_swap_cache(struct page *page)
{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8d4104242100..a5d9fd258f0a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -307,17 +307,32 @@ void free_page_and_swap_cache(struct page *page)
/*
* Passed an array of pages, drop them all from swapcache and then release
- * them. They are removed from the LRU and freed if this is their last use.
+ * them. They are optionally removed from the LRU and freed if this is their
+ * last use.
*/
-void free_pages_and_swap_cache(struct page **pages, int nr)
+static void __free_pages_and_swap_cache(struct page **pages, int nr,
+ bool do_lru)
{
- struct page **pagep = pages;
int i;
- lru_add_drain();
- for (i = 0; i < nr; i++)
- free_swap_cache(pagep[i]);
- release_pages(pagep, nr);
+ if (do_lru)
+ lru_add_drain();
+ for (i = 0; i < nr; i++) {
+ if (!do_lru)
+ VM_WARN_ON_ONCE_PAGE(PageLRU(pages[i]), pages[i]);
+ free_swap_cache(pages[i]);
+ }
+ release_pages(pages, nr);
+}
+
+void free_pages_and_swap_cache(struct page **pages, int nr)
+{
+ __free_pages_and_swap_cache(pages, nr, true);
+}
+
+void free_pages_and_swap_cache_nolru(struct page **pages, int nr)
+{
+ __free_pages_and_swap_cache(pages, nr, false);
}
static inline bool swap_use_vma_readahead(void)
--
2.30.2
Powered by blists - more mailing lists