[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1617140178-8773-41-git-send-email-anthony.yznaga@oracle.com>
Date: Tue, 30 Mar 2021 14:36:15 -0700
From: Anthony Yznaga <anthony.yznaga@...cle.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: willy@...radead.org, corbet@....net, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, x86@...nel.org, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
rppt@...nel.org, akpm@...ux-foundation.org, hughd@...gle.com,
ebiederm@...ssion.com, keescook@...omium.org, ardb@...nel.org,
nivedita@...m.mit.edu, jroedel@...e.de, masahiroy@...nel.org,
nathan@...nel.org, terrelln@...com, vincenzo.frascino@....com,
martin.b.radev@...il.com, andreyknvl@...gle.com,
daniel.kiper@...cle.com, rafael.j.wysocki@...el.com,
dan.j.williams@...el.com, Jonathan.Cameron@...wei.com,
bhe@...hat.com, rminnich@...il.com, ashish.kalra@....com,
guro@...com, hannes@...xchg.org, mhocko@...nel.org,
iamjoonsoo.kim@....com, vbabka@...e.cz, alex.shi@...ux.alibaba.com,
david@...hat.com, richard.weiyang@...il.com,
vdavydov.dev@...il.com, graf@...zon.com, jason.zeng@...el.com,
lei.l.li@...el.com, daniel.m.jordan@...cle.com,
steven.sistare@...cle.com, linux-fsdevel@...r.kernel.org,
linux-doc@...r.kernel.org, kexec@...ts.infradead.org
Subject: [RFC v2 40/43] shmem: initial support for adding multiple pages to pagecache
shmem_insert_pages() currently loops over the array of pages passed
to it and calls shmem_add_to_page_cache() for each one. Prepare
for adding pages to the pagecache in bulk by adding and using a
shmem_add_pages_to_cache() call. For now it just iterates over
an array and adds pages individually, but improvements in performance
when multiple threads are adding to the same pagecache are achieved
by calling a new shmem_add_to_page_cache_fast() function that does
not check for conflicts and drops the xarray lock before updating stats.
Signed-off-by: Anthony Yznaga <anthony.yznaga@...cle.com>
---
mm/shmem.c | 123 +++++++++++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 108 insertions(+), 15 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 63299da75166..f495af51042e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -738,6 +738,74 @@ static int shmem_add_to_page_cache(struct page *page,
return error;
}
+static int shmem_add_to_page_cache_fast(struct page *page,
+ struct address_space *mapping,
+ pgoff_t index, gfp_t gfp,
+ struct mm_struct *charge_mm, bool skipcharge)
+{
+ XA_STATE_ORDER(xas, &mapping->i_pages, index, thp_order(page));
+ unsigned long nr = thp_nr_pages(page);
+ unsigned long i = 0;
+ int error;
+
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ VM_BUG_ON_PAGE(index != round_down(index, nr), page);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
+
+ page_ref_add(page, nr);
+ page->mapping = mapping;
+ page->index = index;
+
+ if (!skipcharge && !PageSwapCache(page)) {
+ error = mem_cgroup_charge(page, charge_mm, gfp);
+ if (error) {
+ if (PageTransHuge(page)) {
+ count_vm_event(THP_FILE_FALLBACK);
+ count_vm_event(THP_FILE_FALLBACK_CHARGE);
+ }
+ goto error;
+ }
+ }
+ cgroup_throttle_swaprate(page, gfp);
+
+ do {
+ xas_lock_irq(&xas);
+ xas_create_range(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+next:
+ xas_store(&xas, page);
+ if (++i < nr) {
+ xas_next(&xas);
+ goto next;
+ }
+ mapping->nrpages += nr;
+ xas_unlock(&xas);
+ if (PageTransHuge(page)) {
+ count_vm_event(THP_FILE_ALLOC);
+ __inc_node_page_state(page, NR_SHMEM_THPS);
+ }
+ __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
+ __mod_lruvec_page_state(page, NR_SHMEM, nr);
+ local_irq_enable();
+ break;
+unlock:
+ xas_unlock_irq(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ if (xas_error(&xas)) {
+ error = xas_error(&xas);
+ goto error;
+ }
+
+ return 0;
+error:
+ page->mapping = NULL;
+ page_ref_sub(page, nr);
+ return error;
+}
+
/*
* Like delete_from_page_cache, but substitutes swap for page.
*/
@@ -759,6 +827,41 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap)
BUG_ON(error);
}
+static int shmem_add_pages_to_cache(struct page *pages[], int npages,
+ struct address_space *mapping,
+ pgoff_t start, gfp_t gfp,
+ struct mm_struct *charge_mm)
+{
+ pgoff_t index = start;
+ int i, err;
+
+ i = 0;
+ while (i < npages) {
+ if (PageTransHuge(pages[i])) {
+ err = shmem_add_to_page_cache_fast(pages[i], mapping, index, gfp, charge_mm, page_memcg(pages[i]) ? true : false);
+ if (err)
+ goto out_release;
+ index += thp_nr_pages(pages[i]);
+ i++;
+ continue;
+ }
+
+ err = shmem_add_to_page_cache_fast(pages[i], mapping, index, gfp, charge_mm, page_memcg(pages[i]) ? true : false);
+ if (err)
+ goto out_release;
+ index++;
+ i++;
+ }
+ return 0;
+
+out_release:
+ while (i < npages) {
+ delete_from_page_cache(pages[i]);
+ i--;
+ }
+ return err;
+}
+
int shmem_insert_page(struct mm_struct *mm, struct inode *inode, pgoff_t index,
struct page *page)
{
@@ -889,17 +992,10 @@ int shmem_insert_pages(struct mm_struct *charge_mm, struct inode *inode,
__SetPageReferenced(pages[i]);
}
- for (i = 0; i < npages; i++) {
- bool ischarged = page_memcg(pages[i]) ? true : false;
-
- err = shmem_add_to_page_cache(pages[i], mapping, index,
- NULL, gfp & GFP_RECLAIM_MASK,
- charge_mm, ischarged);
- if (err)
- goto out_release;
-
- index += thp_nr_pages(pages[i]);
- }
+ err = shmem_add_pages_to_cache(pages, npages, mapping, index,
+ gfp & GFP_RECLAIM_MASK, charge_mm);
+ if (err)
+ goto out_unlock;
spin_lock(&info->lock);
info->alloced += nr;
@@ -922,10 +1018,7 @@ int shmem_insert_pages(struct mm_struct *charge_mm, struct inode *inode,
return 0;
-out_release:
- while (--i >= 0)
- delete_from_page_cache(pages[i]);
-
+out_unlock:
for (i = 0; i < npages; i++)
unlock_page(pages[i]);
--
1.8.3.1
Powered by blists - more mailing lists