[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191122235324.17245-4-agruenba@redhat.com>
Date: Sat, 23 Nov 2019 00:53:24 +0100
From: Andreas Gruenbacher <agruenba@...hat.com>
To: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Steven Whitehouse <swhiteho@...hat.com>,
Konstantin Khlebnikov <khlebnikov@...dex-team.ru>,
"Kirill A. Shutemov" <kirill@...temov.name>, linux-mm@...ck.org,
Andrew Morton <akpm@...ux-foundation.org>,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
Alexander Viro <viro@...iv.linux.org.uk>,
Johannes Weiner <hannes@...xchg.org>, cluster-devel@...hat.com,
Ronnie Sahlberg <lsahlber@...hat.com>,
Steve French <sfrench@...ba.org>,
Bob Peterson <rpeterso@...hat.com>,
Andreas Gruenbacher <agruenba@...hat.com>
Subject: [RFC PATCH 3/3] gfs2: Rework read and page fault locking
Move the glock lock taking code from the ->readpage and ->readpages
address space operations to the ->read_iter file and ->fault vm
operations.
To avoid taking the lock even when an operation can be satisfied out of
the page cache, try the operation without taking the lock first. When
that fails, take the lock and repeat the opeation.
Signed-off-by: Andreas Gruenbacher <agruenba@...hat.com>
---
fs/gfs2/aops.c | 36 ++++++---------------------
fs/gfs2/file.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 71 insertions(+), 31 deletions(-)
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index b9fe975d7625..4aa6c952eb90 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -515,26 +515,10 @@ static int __gfs2_readpage(void *file, struct page *page)
static int gfs2_readpage(struct file *file, struct page *page)
{
- struct address_space *mapping = page->mapping;
- struct gfs2_inode *ip = GFS2_I(mapping->host);
- struct gfs2_holder gh;
int error;
- unlock_page(page);
- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
- error = gfs2_glock_nq(&gh);
- if (unlikely(error))
- goto out;
- error = AOP_TRUNCATED_PAGE;
- lock_page(page);
- if (page->mapping == mapping && !PageUptodate(page))
- error = __gfs2_readpage(file, page);
- else
- unlock_page(page);
- gfs2_glock_dq(&gh);
-out:
- gfs2_holder_uninit(&gh);
- if (error && error != AOP_TRUNCATED_PAGE)
+ error = __gfs2_readpage(file, page);
+ if (error)
lock_page(page);
return error;
}
@@ -602,18 +586,12 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct gfs2_holder gh;
- int ret;
+ int ret = 0;
- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
- ret = gfs2_glock_nq(&gh);
- if (unlikely(ret))
- goto out_uninit;
- if (!gfs2_is_stuffed(ip))
- ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
- gfs2_glock_dq(&gh);
-out_uninit:
- gfs2_holder_uninit(&gh);
+ if (gfs2_is_stuffed(ip))
+ goto out;
+ ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
+out:
if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
ret = -EIO;
return ret;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 997b326247e2..207d39996353 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -524,8 +524,34 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
return block_page_mkwrite_return(ret);
}
+static vm_fault_t gfs2_fault(struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_holder gh;
+ vm_fault_t ret;
+ int err;
+
+ vmf->flags |= FAULT_FLAG_CACHED;
+ ret = filemap_fault(vmf);
+ vmf->flags &= ~FAULT_FLAG_CACHED;
+ if (!(ret & VM_FAULT_RETRY))
+ return ret;
+ gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+ err = gfs2_glock_nq(&gh);
+ if (err) {
+ ret = block_page_mkwrite_return(err);
+ goto out_uninit;
+ }
+ ret = filemap_fault(vmf);
+ gfs2_glock_dq(&gh);
+out_uninit:
+ gfs2_holder_uninit(&gh);
+ return ret;
+}
+
static const struct vm_operations_struct gfs2_vm_ops = {
- .fault = filemap_fault,
+ .fault = gfs2_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = gfs2_page_mkwrite,
};
@@ -778,15 +804,51 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct gfs2_inode *ip;
+ struct gfs2_holder gh;
+ size_t written = 0;
ssize_t ret;
+ gfs2_holder_mark_uninitialized(&gh);
if (iocb->ki_flags & IOCB_DIRECT) {
ret = gfs2_file_direct_read(iocb, to);
if (likely(ret != -ENOTBLK))
return ret;
iocb->ki_flags &= ~IOCB_DIRECT;
}
- return generic_file_read_iter(iocb, to);
+ iocb->ki_flags |= IOCB_CACHED;
+ ret = generic_file_read_iter(iocb, to);
+ iocb->ki_flags &= ~IOCB_CACHED;
+ if (ret >= 0) {
+ if (!iov_iter_count(to))
+ return ret;
+ written = ret;
+ } else {
+ switch(ret) {
+ case -EAGAIN:
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return ret;
+ break;
+ case -ECANCELED:
+ break;
+ default:
+ return ret;
+ }
+ }
+ ip = GFS2_I(iocb->ki_filp->f_mapping->host);
+ gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+ ret = gfs2_glock_nq(&gh);
+ if (ret)
+ goto out_uninit;
+ ret = generic_file_read_iter(iocb, to);
+ if (ret > 0)
+ written += ret;
+ if (gfs2_holder_initialized(&gh))
+ gfs2_glock_dq(&gh);
+out_uninit:
+ if (gfs2_holder_initialized(&gh))
+ gfs2_holder_uninit(&gh);
+ return written ? written : ret;
}
/**
--
2.20.1
Powered by blists - more mailing lists