[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180219194556.6575-28-willy@infradead.org>
Date: Mon, 19 Feb 2018 11:45:22 -0800
From: Matthew Wilcox <willy@...radead.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Matthew Wilcox <mawilcox@...rosoft.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org
Subject: [PATCH v7 27/61] page cache: Add and replace pages using the XArray
From: Matthew Wilcox <mawilcox@...rosoft.com>
Use the XArray APIs to add and replace pages in the page cache. This
removes two uses of the radix tree preload API and is significantly
shorter code.
Signed-off-by: Matthew Wilcox <mawilcox@...rosoft.com>
---
include/linux/swap.h | 8 ++-
mm/filemap.c | 143 ++++++++++++++++++++++-----------------------------
2 files changed, 67 insertions(+), 84 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7b6a59f722a3..c306e14b5ab1 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -299,8 +299,12 @@ void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
-/* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct radix_tree_node *node);
+/* Only track the nodes of mappings with shadow entries */
+void workingset_update_node(struct xa_node *node);
+#define mapping_set_update(xas, mapping) do { \
+ if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
+ xas_set_update(xas, workingset_update_node); \
+} while (0)
/* Returns workingset_update_node() if the mapping has shadow entries. */
#define workingset_lookup_update(mapping) \
diff --git a/mm/filemap.c b/mm/filemap.c
index 778a551f6713..fcfdc146591b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -111,35 +111,6 @@
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
-static int page_cache_tree_insert(struct address_space *mapping,
- struct page *page, void **shadowp)
-{
- struct radix_tree_node *node;
- void **slot;
- int error;
-
- error = __radix_tree_create(&mapping->pages, page->index, 0,
- &node, &slot);
- if (error)
- return error;
- if (*slot) {
- void *p;
-
- p = radix_tree_deref_slot_protected(slot,
- &mapping->pages.xa_lock);
- if (!xa_is_value(p))
- return -EEXIST;
-
- mapping->nrexceptional--;
- if (shadowp)
- *shadowp = p;
- }
- __radix_tree_replace(&mapping->pages, node, slot, page,
- workingset_lookup_update(mapping));
- mapping->nrpages++;
- return 0;
-}
-
static void page_cache_tree_delete(struct address_space *mapping,
struct page *page, void *shadow)
{
@@ -775,51 +746,44 @@ EXPORT_SYMBOL(file_write_and_wait_range);
* locked. This function does not add the new page to the LRU, the
* caller must do that.
*
- * The remove + add is atomic. The only way this function can fail is
- * memory allocation failure.
+ * The remove + add is atomic. This function cannot fail.
*/
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
- int error;
+ struct address_space *mapping = old->mapping;
+ void (*freepage)(struct page *) = mapping->a_ops->freepage;
+ pgoff_t offset = old->index;
+ XA_STATE(xas, &mapping->pages, offset);
+ unsigned long flags;
VM_BUG_ON_PAGE(!PageLocked(old), old);
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
- error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
- if (!error) {
- struct address_space *mapping = old->mapping;
- void (*freepage)(struct page *);
- unsigned long flags;
-
- pgoff_t offset = old->index;
- freepage = mapping->a_ops->freepage;
-
- get_page(new);
- new->mapping = mapping;
- new->index = offset;
+ get_page(new);
+ new->mapping = mapping;
+ new->index = offset;
- xa_lock_irqsave(&mapping->pages, flags);
- __delete_from_page_cache(old, NULL);
- error = page_cache_tree_insert(mapping, new, NULL);
- BUG_ON(error);
+ xas_lock_irqsave(&xas, flags);
+ xas_store(&xas, new);
- /*
- * hugetlb pages do not participate in page cache accounting.
- */
- if (!PageHuge(new))
- __inc_node_page_state(new, NR_FILE_PAGES);
- if (PageSwapBacked(new))
- __inc_node_page_state(new, NR_SHMEM);
- xa_unlock_irqrestore(&mapping->pages, flags);
- mem_cgroup_migrate(old, new);
- radix_tree_preload_end();
- if (freepage)
- freepage(old);
- put_page(old);
- }
+ old->mapping = NULL;
+ /* hugetlb pages do not participate in page cache accounting. */
+ if (!PageHuge(old))
+ __dec_node_page_state(new, NR_FILE_PAGES);
+ if (!PageHuge(new))
+ __inc_node_page_state(new, NR_FILE_PAGES);
+ if (PageSwapBacked(old))
+ __dec_node_page_state(new, NR_SHMEM);
+ if (PageSwapBacked(new))
+ __inc_node_page_state(new, NR_SHMEM);
+ xas_unlock_irqrestore(&xas, flags);
+ mem_cgroup_migrate(old, new);
+ if (freepage)
+ freepage(old);
+ put_page(old);
- return error;
+ return 0;
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
@@ -828,12 +792,15 @@ static int __add_to_page_cache_locked(struct page *page,
pgoff_t offset, gfp_t gfp_mask,
void **shadowp)
{
+ XA_STATE(xas, &mapping->pages, offset);
int huge = PageHuge(page);
struct mem_cgroup *memcg;
int error;
+ void *old;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
+ mapping_set_update(&xas, mapping);
if (!huge) {
error = mem_cgroup_try_charge(page, current->mm,
@@ -842,39 +809,51 @@ static int __add_to_page_cache_locked(struct page *page,
return error;
}
- error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
- if (error) {
- if (!huge)
- mem_cgroup_cancel_charge(page, memcg, false);
- return error;
- }
-
get_page(page);
page->mapping = mapping;
page->index = offset;
- xa_lock_irq(&mapping->pages);
- error = page_cache_tree_insert(mapping, page, shadowp);
- radix_tree_preload_end();
- if (unlikely(error))
- goto err_insert;
+ do {
+ xas_lock_irq(&xas);
+ old = xas_create(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ if (xa_is_value(old)) {
+ mapping->nrexceptional--;
+ if (shadowp)
+ *shadowp = old;
+ } else if (old) {
+ xas_set_err(&xas, -EEXIST);
+ goto unlock;
+ }
+
+ xas_store(&xas, page);
+ mapping->nrpages++;
+
+ /*
+ * hugetlb pages do not participate in
+ * page cache accounting.
+ */
+ if (!huge)
+ __inc_node_page_state(page, NR_FILE_PAGES);
+unlock:
+ xas_unlock_irq(&xas);
+ } while (xas_nomem(&xas, gfp_mask & ~__GFP_HIGHMEM));
+
+ if (xas_error(&xas))
+ goto error;
- /* hugetlb pages do not participate in page cache accounting. */
- if (!huge)
- __inc_node_page_state(page, NR_FILE_PAGES);
- xa_unlock_irq(&mapping->pages);
if (!huge)
mem_cgroup_commit_charge(page, memcg, false, false);
trace_mm_filemap_add_to_page_cache(page);
return 0;
-err_insert:
+error:
page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
- xa_unlock_irq(&mapping->pages);
if (!huge)
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
- return error;
+ return xas_error(&xas);
}
/**
--
2.16.1
Powered by blists - more mailing lists