[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200522202311.10959-4-axboe@kernel.dk>
Date: Fri, 22 May 2020 14:23:03 -0600
From: Jens Axboe <axboe@...nel.dk>
To: io-uring@...r.kernel.org
Cc: linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 03/11] mm: add support for async page locking
Normally waiting for a page to become unlocked, or locking the page,
requires waiting for IO to complete. Add support for lock_page_async()
and wait_on_page_locked_async(), which are callback based instead. This
allows a caller to get notified when a page becomes unlocked, rather
than wait for it.
We use the iocb->private field to pass in this necessary data for this
to happen. struct wait_page_key is made public, and we define struct
wait_page_async as the interface between the caller and the core.
Signed-off-by: Jens Axboe <axboe@...nel.dk>
---
include/linux/fs.h | 2 ++
include/linux/pagemap.h | 22 +++++++++++++++++++++
mm/filemap.c | 44 ++++++++++++++++++++++++++++++++++-------
3 files changed, 61 insertions(+), 7 deletions(-)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7e84d823c6a8..82b989695ab9 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -314,6 +314,8 @@ enum rw_hint {
#define IOCB_SYNC (1 << 5)
#define IOCB_WRITE (1 << 6)
#define IOCB_NOWAIT (1 << 7)
+/* iocb->private holds wait_page_async struct */
+#define IOCB_WAITQ (1 << 8)
struct kiocb {
struct file *ki_filp;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a8f7bd8ea1c6..39af9f890866 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -456,8 +456,21 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return pgoff;
}
+/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
+struct wait_page_key {
+ struct page *page;
+ int bit_nr;
+ int page_match;
+};
+
+struct wait_page_async {
+ struct wait_queue_entry wait;
+ struct wait_page_key key;
+};
+
extern void __lock_page(struct page *page);
extern int __lock_page_killable(struct page *page);
+extern int __lock_page_async(struct page *page, struct wait_page_async *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);
@@ -494,6 +507,15 @@ static inline int lock_page_killable(struct page *page)
return 0;
}
+static inline int lock_page_async(struct page *page,
+ struct wait_page_async *wait)
+{
+ int ret;
+ if (!trylock_page(page))
+ ret = __lock_page_async(page, wait);
+ return ret;
+}
+
/*
* lock_page_or_retry - Lock the page, unless this would block and the
* caller indicated that it can handle a retry.
diff --git a/mm/filemap.c b/mm/filemap.c
index 80747f1377d5..0bc77f431bea 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -990,13 +990,6 @@ void __init pagecache_init(void)
page_writeback_init();
}
-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
-struct wait_page_key {
- struct page *page;
- int bit_nr;
- int page_match;
-};
-
struct wait_page_queue {
struct page *page;
int bit_nr;
@@ -1210,6 +1203,38 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
}
EXPORT_SYMBOL(wait_on_page_bit_killable);
+static int __wait_on_page_locked_async(struct page *page,
+ struct wait_page_async *wait)
+{
+ struct wait_queue_head *q = page_waitqueue(page);
+ int ret = 0;
+
+ wait->key.page = page;
+ wait->key.bit_nr = PG_locked;
+
+ spin_lock_irq(&q->lock);
+ if (PageLocked(page)) {
+ __add_wait_queue_entry_tail(q, &wait->wait);
+ SetPageWaiters(page);
+ ret = -EIOCBQUEUED;
+ }
+ spin_unlock_irq(&q->lock);
+ return ret;
+}
+
+static int wait_on_page_locked_async(struct page *page,
+ struct wait_page_async *wait)
+{
+ int ret;
+ if (!PageLocked(page))
+ return 0;
+ ret = __wait_on_page_locked_async(compound_head(page), wait);
+ if (ret == -EIOCBQUEUED && !PageLocked(page))
+ ret = 0;
+ return ret;
+}
+
+
/**
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
* @page: The page to wait for.
@@ -1372,6 +1397,11 @@ int __lock_page_killable(struct page *__page)
}
EXPORT_SYMBOL_GPL(__lock_page_killable);
+int __lock_page_async(struct page *page, struct wait_page_async *wait)
+{
+ return wait_on_page_locked_async(page, wait);
+}
+
/*
* Return values:
* 1 - page is locked; mmap_sem is still held.
--
2.26.2
Powered by blists - more mailing lists