[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231119194740.94101-17-ryncsn@gmail.com>
Date: Mon, 20 Nov 2023 03:47:32 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
"Huang, Ying" <ying.huang@...el.com>,
David Hildenbrand <david@...hat.com>,
Hugh Dickins <hughd@...gle.com>,
Johannes Weiner <hannes@...xchg.org>,
Matthew Wilcox <willy@...radead.org>,
Michal Hocko <mhocko@...e.com>, linux-kernel@...r.kernel.org,
Kairui Song <kasong@...cent.com>
Subject: [PATCH 16/24] mm/swap: reduce scope of get_swap_device in swapin path
From: Kairui Song <kasong@...cent.com>
Move get_swap_device into swapin_readahead, simplify the code
and prepare for follow up commits.
For the later part in do_swap_page, using swp_swap_info directly is fine
since in that context, the swap device is pinned by swapcache reference.
Signed-off-by: Kairui Song <kasong@...cent.com>
---
mm/memory.c | 16 ++++------------
mm/swap_state.c | 8 ++++++--
mm/swapfile.c | 4 +++-
3 files changed, 13 insertions(+), 15 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 22af9f3e8c75..e399b37ef395 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3789,7 +3789,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
struct folio *swapcache = NULL, *folio = NULL;
enum swap_cache_result cache_result;
struct page *page;
- struct swap_info_struct *si = NULL;
rmap_t rmap_flags = RMAP_NONE;
bool exclusive = false;
swp_entry_t entry;
@@ -3845,14 +3844,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out;
}
- /* Prevent swapoff from happening to us. */
- si = get_swap_device(entry);
- if (unlikely(!si))
- goto out;
-
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf, &cache_result);
- if (page) {
+ if (PTR_ERR(page) == -EBUSY) {
+ goto out;
+ } else if (page) {
folio = page_folio(page);
if (cache_result != SWAP_CACHE_HIT) {
/* Had to read the page from swap area: Major fault */
@@ -3964,7 +3960,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
*/
exclusive = true;
} else if (exclusive && folio_test_writeback(folio) &&
- data_race(si->flags & SWP_STABLE_WRITES)) {
+ (swp_swap_info(entry)->flags & SWP_STABLE_WRITES)) {
/*
* This is tricky: not all swap backends support
* concurrent page modifications while under writeback.
@@ -4068,8 +4064,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
- if (si)
- put_swap_device(si);
return ret;
out_nomap:
if (vmf->pte)
@@ -4082,8 +4076,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_unlock(swapcache);
folio_put(swapcache);
}
- if (si)
- put_swap_device(si);
return ret;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 51de2a0412df..ff8a166603d0 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -922,6 +922,11 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct page *page;
pgoff_t ilx;
+ /* Prevent swapoff from happening to us */
+ si = get_swap_device(entry);
+ if (unlikely(!si))
+ return ERR_PTR(-EBUSY);
+
folio = swap_cache_get_folio(entry, vmf, &shadow);
if (folio) {
page = folio_file_page(folio, swp_offset(entry));
@@ -929,7 +934,6 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
goto done;
}
- si = swp_swap_info(entry);
mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
if (swap_use_no_readahead(si, swp_offset(entry))) {
page = swapin_no_readahead(entry, gfp_mask, mpol, ilx, vmf->vma->vm_mm);
@@ -944,8 +948,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
cache_result = SWAP_CACHE_MISS;
}
mpol_cond_put(mpol);
-
done:
+ put_swap_device(si);
if (result)
*result = cache_result;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b6d57fff5e21..925ad92486a4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1857,7 +1857,9 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
pte = NULL;
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
&vmf, NULL);
- if (page)
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+ else if (page)
folio = page_folio(page);
if (!folio) {
/*
--
2.42.0
Powered by blists - more mailing lists