[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171215220450.7899-60-willy@infradead.org>
Date: Fri, 15 Dec 2017 14:04:31 -0800
From: Matthew Wilcox <willy@...radead.org>
To: linux-kernel@...r.kernel.org
Cc: Matthew Wilcox <mawilcox@...rosoft.com>,
Ross Zwisler <ross.zwisler@...ux.intel.com>,
David Howells <dhowells@...hat.com>,
Shaohua Li <shli@...nel.org>, Jens Axboe <axboe@...nel.dk>,
Rehas Sachdeva <aquannie@...il.com>,
Marc Zyngier <marc.zyngier@....com>, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net,
linux-nilfs@...r.kernel.org, linux-btrfs@...r.kernel.org,
linux-xfs@...r.kernel.org, linux-usb@...r.kernel.org,
linux-raid@...r.kernel.org
Subject: [PATCH v5 59/78] dax: Convert lock_slot to XArray
From: Matthew Wilcox <mawilcox@...rosoft.com>
Signed-off-by: Matthew Wilcox <mawilcox@...rosoft.com>
---
fs/dax.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c
index dd4674ce48f5..46a4d83b1b46 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -188,12 +188,11 @@ static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
/*
* Mark the given slot as locked. Must be called with xa_lock held.
*/
-static inline void *lock_slot(struct address_space *mapping, void **slot)
+static inline void *lock_slot(struct xa_state *xas)
{
- unsigned long v = xa_to_value(
- radix_tree_deref_slot_protected(slot, &mapping->pages.xa_lock));
+ unsigned long v = xa_to_value(xas_load(xas));
void *entry = xa_mk_value(v | DAX_ENTRY_LOCK);
- radix_tree_replace_slot(&mapping->pages, slot, entry);
+ xas_store(xas, entry);
return entry;
}
@@ -244,7 +243,7 @@ static void dax_unlock_mapping_entry(struct address_space *mapping,
xas_lock_irq(&xas);
entry = xas_load(&xas);
- if (WARN_ON_ONCE(!entry || !xa_is_value(entry) || !dax_locked(entry))) {
+ if (WARN_ON_ONCE(!xa_is_value(entry) || !dax_locked(entry))) {
xas_unlock_irq(&xas);
return;
}
@@ -303,6 +302,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
unsigned long size_flag)
{
+ XA_STATE(xas, &mapping->pages, index);
bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
void *entry, **slot;
@@ -341,7 +341,7 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
* Make sure 'entry' remains valid while we drop
* xa_lock.
*/
- entry = lock_slot(mapping, slot);
+ entry = lock_slot(&xas);
}
xa_unlock_irq(&mapping->pages);
@@ -408,7 +408,7 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
xa_unlock_irq(&mapping->pages);
return entry;
}
- entry = lock_slot(mapping, slot);
+ entry = lock_slot(&xas);
out_unlock:
xa_unlock_irq(&mapping->pages);
return entry;
@@ -640,6 +640,7 @@ static int dax_writeback_one(struct block_device *bdev,
pgoff_t index, void *entry)
{
struct radix_tree_root *pages = &mapping->pages;
+ XA_STATE(xas, pages, index);
void *entry2, **slot, *kaddr;
long ret = 0, id;
sector_t sector;
@@ -676,7 +677,7 @@ static int dax_writeback_one(struct block_device *bdev,
if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
goto put_unlocked;
/* Lock the entry to serialize with page faults */
- entry = lock_slot(mapping, slot);
+ entry = lock_slot(&xas);
/*
* We can clear the tag now but we have to be careful so that concurrent
* dax_writeback_one() calls for the same index cannot finish before we
@@ -1501,8 +1502,9 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
pfn_t pfn)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
- void *entry, **slot;
pgoff_t index = vmf->pgoff;
+ XA_STATE(xas, &mapping->pages, index);
+ void *entry, **slot;
int vmf_ret, error;
xa_lock_irq(&mapping->pages);
@@ -1518,7 +1520,7 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
return VM_FAULT_NOPAGE;
}
radix_tree_tag_set(&mapping->pages, index, PAGECACHE_TAG_DIRTY);
- entry = lock_slot(mapping, slot);
+ entry = lock_slot(&xas);
xa_unlock_irq(&mapping->pages);
switch (pe_size) {
case PE_SIZE_PTE:
--
2.15.1
Powered by blists - more mailing lists