[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210122160140.223228-6-willy@infradead.org>
Date: Fri, 22 Jan 2021 16:01:27 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-fsdevel@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Kent Overstreet <kent.overstreet@...il.com>,
Christoph Hellwig <hch@....de>
Subject: [PATCH v5 05/18] mm/filemap: Pass a sleep state to put_and_wait_on_page_locked
This is prep work for the next patch, but I think at least one of the
current callers would prefer a killable sleep to an uninterruptible one.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Reviewed-by: Kent Overstreet <kent.overstreet@...il.com>
Reviewed-by: Christoph Hellwig <hch@....de>
---
include/linux/pagemap.h | 3 +--
mm/filemap.c | 7 +++++--
mm/huge_memory.c | 4 ++--
mm/migrate.c | 4 ++--
4 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4317f34866c75..20225b067583a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -681,8 +681,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
return wait_on_page_bit_killable(compound_head(page), PG_locked);
}
-extern void put_and_wait_on_page_locked(struct page *page);
-
+int put_and_wait_on_page_locked(struct page *page, int state);
void wait_on_page_writeback(struct page *page);
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
diff --git a/mm/filemap.c b/mm/filemap.c
index c5d8b6baf656a..c71cd95e5372b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1379,20 +1379,23 @@ static int wait_on_page_locked_async(struct page *page,
/**
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
* @page: The page to wait for.
+ * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
*
* The caller should hold a reference on @page. They expect the page to
* become unlocked relatively soon, but do not wish to hold up migration
* (for example) by holding the reference while waiting for the page to
* come unlocked. After this function returns, the caller should not
* dereference @page.
+ *
+ * Return: 0 if the page was unlocked or -EINTR if interrupted by a signal.
*/
-void put_and_wait_on_page_locked(struct page *page)
+int put_and_wait_on_page_locked(struct page *page, int state)
{
wait_queue_head_t *q;
page = compound_head(page);
q = page_waitqueue(page);
- wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
+ return wait_on_page_bit_common(q, page, PG_locked, state, DROP);
}
/**
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5aa045e3b5dc1..1764ecd5beba5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1441,7 +1441,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl);
- put_and_wait_on_page_locked(page);
+ put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
goto out;
}
@@ -1477,7 +1477,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl);
- put_and_wait_on_page_locked(page);
+ put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
goto out;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index a3e1acc72ad78..62b81d5257aaa 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -331,7 +331,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
if (!get_page_unless_zero(page))
goto out;
pte_unmap_unlock(ptep, ptl);
- put_and_wait_on_page_locked(page);
+ put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
return;
out:
pte_unmap_unlock(ptep, ptl);
@@ -365,7 +365,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
if (!get_page_unless_zero(page))
goto unlock;
spin_unlock(ptl);
- put_and_wait_on_page_locked(page);
+ put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
return;
unlock:
spin_unlock(ptl);
--
2.29.2
Powered by blists - more mailing lists