[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200922020148.3261797-2-riel@surriel.com>
Date: Mon, 21 Sep 2020 22:01:47 -0400
From: Rik van Riel <riel@...riel.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, kernel-team@...com, niketa@...com,
akpm@...ux-foundation.org, sjenning@...hat.com, ddstreet@...e.org,
konrad.wilk@...cle.com, hannes@...xchg.org,
Rik van Riel <riel@...riel.com>
Subject: [PATCH 1/2] mm,swap: extract swap single page readahead into its own function
Split swap single page readahead into its own function, to make
the next patch easier to read. No functional changes.
Signed-off-by: Rik van Riel <riel@...riel.com>
---
mm/swap_state.c | 40 +++++++++++++++++++++++++---------------
1 file changed, 25 insertions(+), 15 deletions(-)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c16eebb81d8b..aacb9ba53f63 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -594,6 +594,28 @@ static unsigned long swapin_nr_pages(unsigned long offset)
return pages;
}
+static struct page *swap_cluster_read_one(swp_entry_t entry,
+ unsigned long offset, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr, bool readahead)
+{
+ bool page_allocated;
+ struct page *page;
+
+ page =__read_swap_cache_async(swp_entry(swp_type(entry), offset),
+ gfp_mask, vma, addr, &page_allocated);
+ if (!page)
+ return NULL;
+ if (page_allocated) {
+ swap_readpage(page, false);
+ if (readahead) {
+ SetPageReadahead(page);
+ count_vm_event(SWAP_RA);
+ }
+ }
+ put_page(page);
+ return page;
+}
+
/**
* swap_cluster_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
@@ -615,14 +637,13 @@ static unsigned long swapin_nr_pages(unsigned long offset)
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
- struct page *page;
unsigned long entry_offset = swp_offset(entry);
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
unsigned long mask;
struct swap_info_struct *si = swp_swap_info(entry);
struct blk_plug plug;
- bool do_poll = true, page_allocated;
+ bool do_poll = true;
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
@@ -649,19 +670,8 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
- page = __read_swap_cache_async(
- swp_entry(swp_type(entry), offset),
- gfp_mask, vma, addr, &page_allocated);
- if (!page)
- continue;
- if (page_allocated) {
- swap_readpage(page, false);
- if (offset != entry_offset) {
- SetPageReadahead(page);
- count_vm_event(SWAP_RA);
- }
- }
- put_page(page);
+ swap_cluster_read_one(entry, offset, gfp_mask, vma, addr,
+ offset != entry_offset);
}
blk_finish_plug(&plug);
--
2.25.4
Powered by blists - more mailing lists