>From c3d67dc7543abc03161f6cf357039ad9e56783ca Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 12 Dec 2016 16:32:23 +0100 Subject: [PATCH] dax: Fix sleep in atomic contex in grab_mapping_entry() Commit 7b5b8c9c4ac9 "dax: add struct iomap based DAX PMD support" has introduced unmapping of page tables if huge page needs to be split in grab_mapping_entry(). However the unmapping happens after radix_tree_preload() call which disables preemption and thus unmap_mapping_range() tries to acquire i_mmap_lock in atomic context which is a bug. Fix the problem by moving unmapping before radix_tree_preload() call. Fixes: 7b5b8c9c4ac9716fe9d77ec56ae5d962192ba030 Signed-off-by: Jan Kara --- fs/dax.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 51b03e91d3e2..5c74f60d0a50 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -351,14 +351,6 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, } spin_unlock_irq(&mapping->tree_lock); - err = radix_tree_preload( - mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); - if (err) { - if (pmd_downgrade) - put_locked_mapping_entry(mapping, index, entry); - return ERR_PTR(err); - } - /* * Besides huge zero pages the only other thing that gets * downgraded are empty entries which don't need to be @@ -368,6 +360,13 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, unmap_mapping_range(mapping, (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); + err = radix_tree_preload( + mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); + if (err) { + if (pmd_downgrade) + put_locked_mapping_entry(mapping, index, entry); + return ERR_PTR(err); + } spin_lock_irq(&mapping->tree_lock); if (pmd_downgrade) { -- 2.10.2