[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190724042518.14363-11-jhubbard@nvidia.com>
Date: Tue, 23 Jul 2019 21:25:16 -0700
From: john.hubbard@...il.com
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Alexander Viro <viro@...iv.linux.org.uk>,
Anna Schumaker <anna.schumaker@...app.com>,
"David S . Miller" <davem@...emloft.net>,
Dominique Martinet <asmadeus@...ewreck.org>,
Eric Van Hensbergen <ericvh@...il.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Jason Wang <jasowang@...hat.com>, Jens Axboe <axboe@...nel.dk>,
Latchesar Ionkov <lucho@...kov.net>,
"Michael S . Tsirkin" <mst@...hat.com>,
Miklos Szeredi <miklos@...redi.hu>,
Trond Myklebust <trond.myklebust@...merspace.com>,
Christoph Hellwig <hch@....de>,
Matthew Wilcox <willy@...radead.org>, linux-mm@...ck.org,
LKML <linux-kernel@...r.kernel.org>, ceph-devel@...r.kernel.org,
kvm@...r.kernel.org, linux-block@...r.kernel.org,
linux-cifs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-nfs@...r.kernel.org, linux-rdma@...r.kernel.org,
netdev@...r.kernel.org, samba-technical@...ts.samba.org,
v9fs-developer@...ts.sourceforge.net,
virtualization@...ts.linux-foundation.org,
Jérôme Glisse <jglisse@...hat.com>,
John Hubbard <jhubbard@...dia.com>, Jan Kara <jack@...e.cz>,
Dan Williams <dan.j.williams@...el.com>,
Johannes Thumshirn <jthumshirn@...e.de>,
Ming Lei <ming.lei@...hat.com>,
Dave Chinner <david@...morbit.com>,
Boaz Harrosh <boaz@...xistor.com>,
"Yan, Zheng" <zyan@...hat.com>, Sage Weil <sage@...hat.com>,
Ilya Dryomov <idryomov@...il.com>
Subject: [PATCH 10/12] fs/ceph: convert put_page() to put_user_page*()
From: Jérôme Glisse <jglisse@...hat.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Changes from Jérôme's original patch:
* Use the enhanced put_user_pages_dirty_lock().
Signed-off-by: Jérôme Glisse <jglisse@...hat.com>
Signed-off-by: John Hubbard <jhubbard@...dia.com>
Cc: linux-fsdevel@...r.kernel.org
Cc: linux-block@...r.kernel.org
Cc: linux-mm@...ck.org
Cc: ceph-devel@...r.kernel.org
Cc: Jan Kara <jack@...e.cz>
Cc: Dan Williams <dan.j.williams@...el.com>
Cc: Alexander Viro <viro@...iv.linux.org.uk>
Cc: Johannes Thumshirn <jthumshirn@...e.de>
Cc: Christoph Hellwig <hch@....de>
Cc: Jens Axboe <axboe@...nel.dk>
Cc: Ming Lei <ming.lei@...hat.com>
Cc: Dave Chinner <david@...morbit.com>
Cc: Jason Gunthorpe <jgg@...pe.ca>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Boaz Harrosh <boaz@...xistor.com>
Cc: "Yan, Zheng" <zyan@...hat.com>
Cc: Sage Weil <sage@...hat.com>
Cc: Ilya Dryomov <idryomov@...il.com>
---
fs/ceph/file.c | 62 ++++++++++++++++++++++++++++++++++++++------------
1 file changed, 48 insertions(+), 14 deletions(-)
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 685a03cc4b77..c628a1f96978 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -158,18 +158,26 @@ static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
return bytes;
}
-static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
+static void put_bvecs(struct bio_vec *bv, int num_bvecs, bool should_dirty,
+ bool from_gup)
{
int i;
+
for (i = 0; i < num_bvecs; i++) {
- if (bvecs[i].bv_page) {
+ if (!bv[i].bv_page)
+ continue;
+
+ if (from_gup) {
+ put_user_pages_dirty_lock(&bv[i].bv_page, 1,
+ should_dirty);
+ } else {
if (should_dirty)
- set_page_dirty_lock(bvecs[i].bv_page);
- put_page(bvecs[i].bv_page);
+ set_page_dirty_lock(bv[i].bv_page);
+ put_page(bv[i].bv_page);
}
}
- kvfree(bvecs);
+ kvfree(bv);
}
/*
@@ -730,6 +738,7 @@ struct ceph_aio_work {
};
static void ceph_aio_retry_work(struct work_struct *work);
+static void ceph_aio_from_gup_retry_work(struct work_struct *work);
static void ceph_aio_complete(struct inode *inode,
struct ceph_aio_request *aio_req)
@@ -774,7 +783,7 @@ static void ceph_aio_complete(struct inode *inode,
kfree(aio_req);
}
-static void ceph_aio_complete_req(struct ceph_osd_request *req)
+static void _ceph_aio_complete_req(struct ceph_osd_request *req, bool from_gup)
{
int rc = req->r_result;
struct inode *inode = req->r_inode;
@@ -793,7 +802,9 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
if (aio_work) {
- INIT_WORK(&aio_work->work, ceph_aio_retry_work);
+ INIT_WORK(&aio_work->work, from_gup ?
+ ceph_aio_from_gup_retry_work :
+ ceph_aio_retry_work);
aio_work->req = req;
queue_work(ceph_inode_to_client(inode)->inode_wq,
&aio_work->work);
@@ -830,7 +841,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
}
put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
- aio_req->should_dirty);
+ aio_req->should_dirty, from_gup);
ceph_osdc_put_request(req);
if (rc < 0)
@@ -840,7 +851,17 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
return;
}
-static void ceph_aio_retry_work(struct work_struct *work)
+static void ceph_aio_complete_req(struct ceph_osd_request *req)
+{
+ _ceph_aio_complete_req(req, false);
+}
+
+static void ceph_aio_from_gup_complete_req(struct ceph_osd_request *req)
+{
+ _ceph_aio_complete_req(req, true);
+}
+
+static void _ceph_aio_retry_work(struct work_struct *work, bool from_gup)
{
struct ceph_aio_work *aio_work =
container_of(work, struct ceph_aio_work, work);
@@ -891,7 +912,8 @@ static void ceph_aio_retry_work(struct work_struct *work)
ceph_osdc_put_request(orig_req);
- req->r_callback = ceph_aio_complete_req;
+ req->r_callback = from_gup ? ceph_aio_from_gup_complete_req :
+ ceph_aio_complete_req;
req->r_inode = inode;
req->r_priv = aio_req;
@@ -899,13 +921,23 @@ static void ceph_aio_retry_work(struct work_struct *work)
out:
if (ret < 0) {
req->r_result = ret;
- ceph_aio_complete_req(req);
+ _ceph_aio_complete_req(req, from_gup);
}
ceph_put_snap_context(snapc);
kfree(aio_work);
}
+static void ceph_aio_retry_work(struct work_struct *work)
+{
+ _ceph_aio_retry_work(work, false);
+}
+
+static void ceph_aio_from_gup_retry_work(struct work_struct *work)
+{
+ _ceph_aio_retry_work(work, true);
+}
+
static ssize_t
ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct ceph_snap_context *snapc,
@@ -927,6 +959,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos = iocb->ki_pos;
bool write = iov_iter_rw(iter) == WRITE;
bool should_dirty = !write && iter_is_iovec(iter);
+ bool from_gup = iov_iter_get_pages_use_gup(iter);
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
@@ -1023,7 +1056,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
aio_req->num_reqs++;
atomic_inc(&aio_req->pending_reqs);
- req->r_callback = ceph_aio_complete_req;
+ req->r_callback = !from_gup ? ceph_aio_complete_req :
+ ceph_aio_from_gup_complete_req;
req->r_inode = inode;
req->r_priv = aio_req;
list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
@@ -1054,7 +1088,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
len = ret;
}
- put_bvecs(bvecs, num_pages, should_dirty);
+ put_bvecs(bvecs, num_pages, should_dirty, from_gup);
ceph_osdc_put_request(req);
if (ret < 0)
break;
@@ -1093,7 +1127,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req, false);
if (ret < 0) {
req->r_result = ret;
- ceph_aio_complete_req(req);
+ _ceph_aio_complete_req(req, from_gup);
}
}
return -EIOCBQUEUED;
--
2.22.0
Powered by blists - more mailing lists