[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <20240531081230.310128-4-ying.huang@intel.com>
Date: Fri, 31 May 2024 16:12:30 +0800
From: Huang Ying <ying.huang@...el.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Huang Ying <ying.huang@...el.com>,
Hugh Dickins <hughd@...gle.com>,
Alistair Popple <apopple@...dia.com>,
Anshuman Khandual <anshuman.khandual@....com>,
David Hildenbrand <david@...hat.com>,
Mel Gorman <mgorman@...hsingularity.net>,
Miaohe Lin <linmiaohe@...wei.com>,
Minchan Kim <minchan@...nel.org>,
Ryan Roberts <ryan.roberts@....com>,
Yang Shi <shy828301@...il.com>,
Yu Zhao <yuzhao@...gle.com>,
Kairui Song <kasong@...cent.com>,
Barry Song <v-songbaohua@...o.com>,
Chris Li <chrisl@...nel.org>,
Yosry Ahmed <yosryahmed@...gle.com>
Subject: [PATCH 3/3] mm,swap: simplify VMA based swap readahead window calculation
Replace PFNs with addresses in readahead window calculation. This
simplified the logic and reduce the code line number.
No functionality change is expected.
Signed-off-by: "Huang, Ying" <ying.huang@...el.com>
Cc: Hugh Dickins <hughd@...gle.com>
Cc: Alistair Popple <apopple@...dia.com>
Cc: Anshuman Khandual <anshuman.khandual@....com>
Cc: David Hildenbrand <david@...hat.com>
Cc: Mel Gorman <mgorman@...hsingularity.net>
Cc: Miaohe Lin <linmiaohe@...wei.com>
Cc: Minchan Kim <minchan@...nel.org>
Cc: Ryan Roberts <ryan.roberts@....com>
Cc: Yang Shi <shy828301@...il.com>
Cc: Yu Zhao <yuzhao@...gle.com>
Cc: Kairui Song <kasong@...cent.com>
Cc: Barry Song <v-songbaohua@...o.com>
Cc: Chris Li <chrisl@...nel.org>
Cc: Yosry Ahmed <yosryahmed@...gle.com>
---
mm/swap_state.c | 66 +++++++++++++++++++------------------------------
1 file changed, 25 insertions(+), 41 deletions(-)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e1dac70198a6..d2adbd7b571b 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -740,54 +740,40 @@ void exit_swap_address_space(unsigned int type)
swapper_spaces[type] = NULL;
}
-static unsigned short swap_vma_ra_win(struct vm_fault *vmf,
- unsigned short *offset,
- unsigned short *nr_pte)
+static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start,
+ unsigned long *end)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long ra_val;
- unsigned long faddr, pfn, fpfn, lpfn, rpfn;
- unsigned long start, end;
+ unsigned long faddr, prev_faddr, left, right;
unsigned int max_win, hits, prev_win, win;
- max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
- SWAP_RA_ORDER_CEILING);
+ max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING);
if (max_win == 1)
return 1;
faddr = vmf->address;
- fpfn = PFN_DOWN(faddr);
ra_val = GET_SWAP_RA_VAL(vma);
- pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
+ prev_faddr = SWAP_RA_ADDR(ra_val);
prev_win = SWAP_RA_WIN(ra_val);
hits = SWAP_RA_HITS(ra_val);
- win = __swapin_nr_pages(pfn, fpfn, hits, max_win, prev_win);
- atomic_long_set(&vma->swap_readahead_info,
- SWAP_RA_VAL(faddr, win, 0));
+ win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits,
+ max_win, prev_win);
+ atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0));
if (win == 1)
return 1;
- if (fpfn == pfn + 1) {
- lpfn = fpfn;
- rpfn = fpfn + win;
- } else if (pfn == fpfn + 1) {
- lpfn = fpfn - win + 1;
- rpfn = fpfn + 1;
- } else {
- unsigned int left = (win - 1) / 2;
-
- lpfn = fpfn - left;
- rpfn = fpfn + win - left;
- }
- if ((long)lpfn < 0)
- lpfn = 0;
- start = max3(lpfn, PFN_DOWN(vma->vm_start),
- PFN_DOWN(faddr & PMD_MASK));
- end = min3(rpfn, PFN_DOWN(vma->vm_end),
- PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
-
- *nr_pte = end - start;
- *offset = fpfn - start;
+ if (faddr == prev_faddr + PAGE_SIZE)
+ left = faddr;
+ else if (prev_faddr == faddr + PAGE_SIZE)
+ left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
+ else
+ left = faddr - (((win - 1) / 2) << PAGE_SHIFT);
+ right = left + (win << PAGE_SHIFT);
+ if ((long)left < 0)
+ left = 0;
+ *start = max3(left, vma->vm_start, faddr & PMD_MASK);
+ *end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
return win;
}
@@ -815,22 +801,20 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
struct swap_iocb *splug = NULL;
struct folio *folio;
pte_t *pte = NULL, pentry;
- unsigned long addr;
+ int win;
+ unsigned long start, end, addr;
swp_entry_t entry;
pgoff_t ilx;
- unsigned int i;
bool page_allocated;
- unsigned short win, nr_pte, offset;
- win = swap_vma_ra_win(vmf, &offset, &nr_pte);
+ win = swap_vma_ra_win(vmf, &start, &end);
if (win == 1)
goto skip;
- addr = vmf->address - offset * PAGE_SIZE;
- ilx = targ_ilx - offset;
+ ilx = targ_ilx - PFN_DOWN(vmf->address - start);
blk_start_plug(&plug);
- for (i = 0; i < nr_pte; i++, ilx++, addr += PAGE_SIZE) {
+ for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
if (!pte++) {
pte = pte_offset_map(vmf->pmd, addr);
if (!pte)
@@ -850,7 +834,7 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
continue;
if (page_allocated) {
swap_read_folio(folio, false, &splug);
- if (i != offset) {
+ if (addr != vmf->address) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
}
--
2.39.2
Powered by blists - more mailing lists