[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180611140639.17215-29-willy@infradead.org>
Date: Mon, 11 Jun 2018 07:05:55 -0700
From: Matthew Wilcox <willy@...radead.org>
To: linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org
Cc: Matthew Wilcox <mawilcox@...rosoft.com>, Jan Kara <jack@...e.cz>,
Jeff Layton <jlayton@...hat.com>,
Lukas Czerner <lczerner@...hat.com>,
Ross Zwisler <ross.zwisler@...ux.intel.com>,
Christoph Hellwig <hch@....de>,
Goldwyn Rodrigues <rgoldwyn@...e.com>,
Nicholas Piggin <npiggin@...il.com>,
Ryusuke Konishi <konishi.ryusuke@....ntt.co.jp>,
linux-nilfs@...r.kernel.org, Jaegeuk Kim <jaegeuk@...nel.org>,
Chao Yu <yuchao0@...wei.com>,
linux-f2fs-devel@...ts.sourceforge.net
Subject: [PATCH v13 28/72] page cache: Convert find_get_entries_tag to XArray
From: Matthew Wilcox <mawilcox@...rosoft.com>
Slightly shorter and simpler code.
Signed-off-by: Matthew Wilcox <mawilcox@...rosoft.com>
---
include/linux/pagemap.h | 2 +-
mm/filemap.c | 54 ++++++++++++++++++-----------------------
2 files changed, 25 insertions(+), 31 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a6d635fefb01..442977811b59 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -373,7 +373,7 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping,
nr_pages, pages);
}
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
- int tag, unsigned int nr_entries,
+ xa_tag_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index 83328635edaa..67f04bcdf9ef 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1866,57 +1866,51 @@ EXPORT_SYMBOL(find_get_pages_range_tag);
* @tag.
*/
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
- int tag, unsigned int nr_entries,
+ xa_tag_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices)
{
- void **slot;
+ XA_STATE(xas, &mapping->i_pages, start);
+ struct page *page;
unsigned int ret = 0;
- struct radix_tree_iter iter;
if (!nr_entries)
return 0;
rcu_read_lock();
- radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, tag) {
- struct page *head, *page;
-repeat:
- page = radix_tree_deref_slot(slot);
- if (unlikely(!page))
+ xas_for_each_tagged(&xas, page, ULONG_MAX, tag) {
+ struct page *head;
+ if (xas_retry(&xas, page))
continue;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- slot = radix_tree_iter_retry(&iter);
- continue;
- }
-
- /*
- * A shadow entry of a recently evicted page, a swap
- * entry from shmem/tmpfs or a DAX entry. Return it
- * without attempting to raise page count.
- */
+ /*
+ * A shadow entry of a recently evicted page, a swap
+ * entry from shmem/tmpfs or a DAX entry. Return it
+ * without attempting to raise page count.
+ */
+ if (xa_is_value(page))
goto export;
- }
head = compound_head(page);
if (!page_cache_get_speculative(head))
- goto repeat;
+ goto retry;
/* The page was split under us? */
- if (compound_head(page) != head) {
- put_page(head);
- goto repeat;
- }
+ if (compound_head(page) != head)
+ goto put_page;
/* Has the page moved? */
- if (unlikely(page != *slot)) {
- put_page(head);
- goto repeat;
- }
+ if (unlikely(page != xas_reload(&xas)))
+ goto put_page;
+
export:
- indices[ret] = iter.index;
+ indices[ret] = xas.xa_index;
entries[ret] = page;
if (++ret == nr_entries)
break;
+ continue;
+put_page:
+ put_page(head);
+retry:
+ xas_reset(&xas);
}
rcu_read_unlock();
return ret;
--
2.17.1
Powered by blists - more mailing lists