[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180306192413.5499-49-willy@infradead.org>
Date: Tue, 6 Mar 2018 11:23:58 -0800
From: Matthew Wilcox <willy@...radead.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Matthew Wilcox <mawilcox@...rosoft.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org,
Ryusuke Konishi <konishi.ryusuke@....ntt.co.jp>,
linux-nilfs@...r.kernel.org, linux-btrfs@...r.kernel.org
Subject: [PATCH v8 48/63] shmem: Convert shmem_add_to_page_cache to XArray
From: Matthew Wilcox <mawilcox@...rosoft.com>
This removes the last caller of radix_tree_maybe_preload_order().
Simpler code, unless we run out of memory for new xa_nodes partway through
inserting entries into the xarray. Hopefully we can support multi-index
entries in the page cache soon and all the awful code goes away.
Signed-off-by: Matthew Wilcox <mawilcox@...rosoft.com>
---
mm/shmem.c | 87 ++++++++++++++++++++++++++++----------------------------------
1 file changed, 39 insertions(+), 48 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 49f42dc9e1dc..fb06fb3e644a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -558,9 +558,10 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
*/
static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping,
- pgoff_t index, void *expected)
+ pgoff_t index, void *expected, gfp_t gfp)
{
- int error, nr = hpage_nr_pages(page);
+ XA_STATE(xas, &mapping->i_pages, index);
+ unsigned long i, nr = 1UL << compound_order(page);
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(index != round_down(index, nr), page);
@@ -569,49 +570,47 @@ static int shmem_add_to_page_cache(struct page *page,
VM_BUG_ON(expected && PageTransHuge(page));
page_ref_add(page, nr);
- page->mapping = mapping;
page->index = index;
+ page->mapping = mapping;
- xa_lock_irq(&mapping->i_pages);
- if (PageTransHuge(page)) {
- void __rcu **results;
- pgoff_t idx;
- int i;
-
- error = 0;
- if (radix_tree_gang_lookup_slot(&mapping->i_pages,
- &results, &idx, index, 1) &&
- idx < index + HPAGE_PMD_NR) {
- error = -EEXIST;
+ do {
+ xas_lock_irq(&xas);
+ xas_create_range(&xas, index + nr - 1);
+ if (xas_error(&xas))
+ goto unlock;
+ for (i = 0; i < nr; i++) {
+ void *entry = xas_load(&xas);
+ if (entry != expected)
+ xas_set_err(&xas, -ENOENT);
+ if (xas_error(&xas))
+ goto undo;
+ xas_store(&xas, page + i);
+ xas_next(&xas);
}
-
- if (!error) {
- for (i = 0; i < HPAGE_PMD_NR; i++) {
- error = radix_tree_insert(&mapping->i_pages,
- index + i, page + i);
- VM_BUG_ON(error);
- }
+ if (PageTransHuge(page)) {
count_vm_event(THP_FILE_ALLOC);
+ __inc_node_page_state(page, NR_SHMEM_THPS);
}
- } else if (!expected) {
- error = radix_tree_insert(&mapping->i_pages, index, page);
- } else {
- error = shmem_xa_replace(mapping, index, expected, page);
- }
-
- if (!error) {
mapping->nrpages += nr;
- if (PageTransHuge(page))
- __inc_node_page_state(page, NR_SHMEM_THPS);
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
__mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
- xa_unlock_irq(&mapping->i_pages);
- } else {
+ goto unlock;
+undo:
+ while (i-- > 0) {
+ xas_store(&xas, NULL);
+ xas_prev(&xas);
+ }
+unlock:
+ xas_unlock_irq(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ if (xas_error(&xas)) {
page->mapping = NULL;
- xa_unlock_irq(&mapping->i_pages);
page_ref_sub(page, nr);
+ return xas_error(&xas);
}
- return error;
+
+ return 0;
}
/*
@@ -1159,7 +1158,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
*/
if (!error)
error = shmem_add_to_page_cache(*pagep, mapping, index,
- radswap);
+ radswap, gfp);
if (error != -ENOMEM) {
/*
* Truncation and eviction use free_swap_and_cache(), which
@@ -1680,7 +1679,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
false);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
- swp_to_radix_entry(swap));
+ swp_to_radix_entry(swap), gfp);
/*
* We already confirmed swap under page lock, and make
* no memory allocation here, so usually no possibility
@@ -1786,13 +1785,8 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
PageTransHuge(page));
if (error)
goto unacct;
- error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK,
- compound_order(page));
- if (!error) {
- error = shmem_add_to_page_cache(page, mapping, hindex,
- NULL);
- radix_tree_preload_end();
- }
+ error = shmem_add_to_page_cache(page, mapping, hindex,
+ NULL, gfp & GFP_RECLAIM_MASK);
if (error) {
mem_cgroup_cancel_charge(page, memcg,
PageTransHuge(page));
@@ -2259,11 +2253,8 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (ret)
goto out_release;
- ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
- if (!ret) {
- ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL);
- radix_tree_preload_end();
- }
+ ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
+ gfp & GFP_RECLAIM_MASK);
if (ret)
goto out_release_uncharge;
--
2.16.1
Powered by blists - more mailing lists