[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230530080731.1462122-2-fengwei.yin@intel.com>
Date: Tue, 30 May 2023 16:07:28 +0800
From: Yin Fengwei <fengwei.yin@...el.com>
To: willy@...radead.org, ryan.roberts@....com,
linux-arch@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: fengwei.yin@...el.com
Subject: [PATCH 1/4] filemap: avoid interfere with xas.xa_index
Ryan noticed 1% performance regression for kernel build with
the ranged file map with ext4 file system. It was later identified
wrong xas.xa_index update in filemap_map_pages() when folio is
not large folio.
Matthew suggested to use XArray API instead of touch xas.xa_index
directly at [1].
[1] https://lore.kernel.org/linux-mm/ZBho6Q6Xq%2FYqRmBT@casper.infradead.org/
Signed-off-by: Yin Fengwei <fengwei.yin@...el.com>
Suggested-by: Matthew Wilcox <willy@...radead.org>
---
mm/filemap.c | 30 ++++++------------------------
1 file changed, 6 insertions(+), 24 deletions(-)
diff --git a/mm/filemap.c b/mm/filemap.c
index 40be33b5ee46..fdb3e0a339b3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3416,10 +3416,10 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
return false;
}
-static struct folio *next_uptodate_page(struct folio *folio,
- struct address_space *mapping,
- struct xa_state *xas, pgoff_t end_pgoff)
+static struct folio *next_uptodate_folio(struct xa_state *xas,
+ struct address_space *mapping, pgoff_t end_pgoff)
{
+ struct folio *folio = xas_next_entry(xas, end_pgoff);
unsigned long max_idx;
do {
@@ -3457,22 +3457,6 @@ static struct folio *next_uptodate_page(struct folio *folio,
return NULL;
}
-static inline struct folio *first_map_page(struct address_space *mapping,
- struct xa_state *xas,
- pgoff_t end_pgoff)
-{
- return next_uptodate_page(xas_find(xas, end_pgoff),
- mapping, xas, end_pgoff);
-}
-
-static inline struct folio *next_map_page(struct address_space *mapping,
- struct xa_state *xas,
- pgoff_t end_pgoff)
-{
- return next_uptodate_page(xas_next_entry(xas, end_pgoff),
- mapping, xas, end_pgoff);
-}
-
/*
* Map page range [start_page, start_page + nr_pages) of folio.
* start_page is gotten from start by folio_page(folio, start)
@@ -3543,12 +3527,11 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long addr;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
- unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
vm_fault_t ret = 0;
int nr_pages = 0;
rcu_read_lock();
- folio = first_map_page(mapping, &xas, end_pgoff);
+ folio = next_uptodate_folio(&xas, mapping, end_pgoff);
if (!folio)
goto out;
@@ -3570,15 +3553,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr, nr_pages);
- xas.xa_index += nr_pages;
folio_unlock(folio);
folio_put(folio);
- } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
+ folio = next_uptodate_folio(&xas, mapping, end_pgoff);
+ } while (folio);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();
- WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
return ret;
}
EXPORT_SYMBOL(filemap_map_pages);
--
2.30.2
Powered by blists - more mailing lists