[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190411210834.4105-12-jglisse@redhat.com>
Date: Thu, 11 Apr 2019 17:08:30 -0400
From: jglisse@...hat.com
To: linux-kernel@...r.kernel.org
Cc: Jérôme Glisse <jglisse@...hat.com>,
linux-fsdevel@...r.kernel.org, linux-block@...r.kernel.org,
linux-mm@...ck.org, John Hubbard <jhubbard@...dia.com>,
Jan Kara <jack@...e.cz>,
Dan Williams <dan.j.williams@...el.com>,
Alexander Viro <viro@...iv.linux.org.uk>,
Johannes Thumshirn <jthumshirn@...e.de>,
Christoph Hellwig <hch@....de>, Jens Axboe <axboe@...nel.dk>,
Ming Lei <ming.lei@...hat.com>,
Dave Chinner <david@...morbit.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Matthew Wilcox <willy@...radead.org>
Subject: [PATCH v1 11/15] block: make sure bio_add_page*() knows page that are coming from GUP
From: Jérôme Glisse <jglisse@...hat.com>
When we get a page reference through get_user_page*() we want to keep
track of that so pass down that information to bio_add_page*().
Signed-off-by: Jérôme Glisse <jglisse@...hat.com>
Cc: linux-fsdevel@...r.kernel.org
Cc: linux-block@...r.kernel.org
Cc: linux-mm@...ck.org
Cc: John Hubbard <jhubbard@...dia.com>
Cc: Jan Kara <jack@...e.cz>
Cc: Dan Williams <dan.j.williams@...el.com>
Cc: Alexander Viro <viro@...iv.linux.org.uk>
Cc: Johannes Thumshirn <jthumshirn@...e.de>
Cc: Christoph Hellwig <hch@....de>
Cc: Jens Axboe <axboe@...nel.dk>
Cc: Ming Lei <ming.lei@...hat.com>
Cc: Dave Chinner <david@...morbit.com>
Cc: Jason Gunthorpe <jgg@...pe.ca>
Cc: Matthew Wilcox <willy@...radead.org>
---
block/bio.c | 34 +++++++++++++++++++++++++++-------
1 file changed, 27 insertions(+), 7 deletions(-)
diff --git a/block/bio.c b/block/bio.c
index 73227ede9a0a..197b70426aa6 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -708,7 +708,10 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
* cannot add the page
*/
bvec = &bio->bi_io_vec[bio->bi_vcnt];
- bvec_set_page(bvec, page);
+ if (is_gup)
+ bvec_set_gup_page(bvec, page);
+ else
+ bvec_set_page(bvec, page);
bvec->bv_len = len;
bvec->bv_offset = offset;
bio->bi_vcnt++;
@@ -793,6 +796,7 @@ EXPORT_SYMBOL_GPL(__bio_try_merge_page);
* @page: page to add
* @len: length of the data to add
* @off: offset of the data in @page
+ * @is_gup: was the page referenced through GUP (get_user_page*)
*
* Add the data at @page + @off to @bio as a new bvec. The caller must ensure
* that @bio has space for another bvec.
@@ -805,7 +809,10 @@ void __bio_add_page(struct bio *bio, struct page *page,
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
WARN_ON_ONCE(bio_full(bio));
- bvec_set_page(bv, page);
+ if (is_gup)
+ bvec_set_gup_page(bv, page);
+ else
+ bvec_set_page(bv, page);
bv->bv_offset = off;
bv->bv_len = len;
@@ -820,6 +827,7 @@ EXPORT_SYMBOL_GPL(__bio_add_page);
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
+ * @is_gup: was the page referenced through GUP (get_user_page*)
*
* Attempt to add a page to the bio_vec maplist. This will only fail
* if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
@@ -830,7 +838,7 @@ int bio_add_page(struct bio *bio, struct page *page,
if (!__bio_try_merge_page(bio, page, len, offset, false)) {
if (bio_full(bio))
return 0;
- __bio_add_page(bio, page, len, offset, false);
+ __bio_add_page(bio, page, len, offset, is_gup);
}
return len;
}
@@ -885,6 +893,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
ssize_t size, left;
unsigned len, i;
size_t offset;
+ bool gup;
/*
* Move page array up in the allocated memory for the bio vecs as far as
@@ -894,6 +903,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
+ /* Is iov_iter_get_pages() using GUP ? */
+ gup = iov_iter_get_pages_use_gup(iter);
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
@@ -902,7 +913,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
struct page *page = pages[i];
len = min_t(size_t, PAGE_SIZE - offset, left);
- if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset, false) != len))
+ if (WARN_ON_ONCE(bio_add_page(bio, page, len,
+ offset, gup) != len))
return -EINVAL;
offset = 0;
}
@@ -1372,6 +1384,10 @@ struct bio *bio_map_user_iov(struct request_queue *q,
ssize_t bytes;
size_t offs, added = 0;
int npages;
+ bool gup;
+
+ /* Is iov_iter_get_pages() using GUP ? */
+ gup = iov_iter_get_pages_use_gup(iter);
bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
if (unlikely(bytes <= 0)) {
@@ -1393,7 +1409,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
if (n > bytes)
n = bytes;
- if (!bio_add_pc_page(q, bio, page, n, offs, false))
+ if (!bio_add_pc_page(q, bio, page, n, offs, gup))
break;
/*
@@ -1412,8 +1428,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
/*
* release the pages we didn't map into the bio, if any
*/
- while (j < npages)
- put_page(pages[j++]);
+ while (j < npages) {
+ if (gup)
+ put_user_page(pages[j++]);
+ else
+ put_page(pages[j++]);
+ }
kvfree(pages);
/* couldn't stuff something into bio? */
if (bytes)
--
2.20.1
Powered by blists - more mailing lists