diff --git a/fs/dax.c b/fs/dax.c index 26798cdc6789..2913a82dd68d 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -327,7 +327,7 @@ EXPORT_SYMBOL_GPL(dax_do_io); * DAX radix tree locking */ struct exceptional_entry_key { - struct radix_tree_root *root; + struct address_space *mapping; unsigned long index; }; @@ -343,7 +343,8 @@ static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, struct wait_exceptional_entry_queue *ewait = container_of(wait, struct wait_exceptional_entry_queue, wait); - if (key->root != ewait->key.root || key->index != ewait->key.index) + if (key->mapping != ewait->key.mapping || + key->index != ewait->key.index) return 0; return autoremove_wake_function(wait, mode, sync, NULL); } @@ -489,8 +490,8 @@ restart: return ret; } -static void wake_mapping_entry_waiter(struct address_space *mapping, - pgoff_t index, bool wake_all) +void dax_wake_mapping_entry_waiter(struct address_space *mapping, + pgoff_t index, bool wake_all) { wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index); @@ -503,7 +504,7 @@ static void wake_mapping_entry_waiter(struct address_space *mapping, if (waitqueue_active(wq)) { struct exceptional_entry_key key; - key.root = &mapping->page_tree; + key.mapping = mapping; key.index = index; __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); } @@ -522,7 +523,7 @@ static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) } unlock_slot(mapping, slot); spin_unlock_irq(&mapping->tree_lock); - wake_mapping_entry_waiter(mapping, index, false); + dax_wake_mapping_entry_waiter(mapping, index, false); } static void put_locked_mapping_entry(struct address_space *mapping, @@ -547,7 +548,7 @@ static void put_unlocked_mapping_entry(struct address_space *mapping, return; /* We have to wake up next waiter for the radix tree entry lock */ - wake_mapping_entry_waiter(mapping, index, false); + dax_wake_mapping_entry_waiter(mapping, index, false); } /* @@ -571,7 +572,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) radix_tree_delete(&mapping->page_tree, index); mapping->nrexceptional--; spin_unlock_irq(&mapping->tree_lock); - wake_mapping_entry_waiter(mapping, index, true); + dax_wake_mapping_entry_waiter(mapping, index, true); return 1; } diff --git a/include/linux/dax.h b/include/linux/dax.h index be40ec13d469..d3d788b44d66 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -17,6 +17,8 @@ int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +void dax_wake_mapping_entry_waiter(struct address_space *mapping, + pgoff_t index, bool wake_all); #ifdef CONFIG_FS_DAX struct page *read_dax_sector(struct block_device *bdev, sector_t n); diff --git a/mm/filemap.c b/mm/filemap.c index 3effd5c8f2f6..6d42525a68eb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -608,6 +608,9 @@ static int page_cache_tree_insert(struct address_space *mapping, WARN_ON_ONCE(p != (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | RADIX_DAX_ENTRY_LOCK)); + /* Wakeup waiters for exceptional entry lock */ + dax_wake_mapping_entry_waiter(mapping, page->index, + false); } } radix_tree_replace_slot(slot, page);