[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200629150323.GA3293033@google.com>
Date: Mon, 29 Jun 2020 08:03:23 -0700
From: Jaegeuk Kim <jaegeuk@...nel.org>
To: linux-kernel@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net, kernel-team@...roid.com
Subject: Re: [PATCH v2] f2fs: avoid readahead race condition
If two readahead threads having same offset enter in readpages, every read
IOs are split and issued to the disk which giving lower bandwidth.
This patch tries to avoid redundant readahead calls.
Signed-off-by: Jaegeuk Kim <jaegeuk@...nel.org>
---
v2:
- add missing code to bypass read
fs/f2fs/data.c | 18 +++++++++++++++++-
fs/f2fs/f2fs.h | 1 +
fs/f2fs/super.c | 2 ++
3 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index d6094b9f3916..9b69a159cc6c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2403,6 +2403,7 @@ int f2fs_mpage_readpages(struct address_space *mapping,
#endif
unsigned max_nr_pages = nr_pages;
int ret = 0;
+ bool drop_ra = false;
map.m_pblk = 0;
map.m_lblk = 0;
@@ -2413,13 +2414,25 @@ int f2fs_mpage_readpages(struct address_space *mapping,
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
+ /*
+ * Two readahead threads for same address range can cause race condition
+ * which fragments sequential read IOs. So let's avoid each other.
+ */
+ if (pages && is_readahead) {
+ page = list_last_entry(pages, struct page, lru);
+ if (F2FS_I(inode)->ra_offset == page_index(page))
+ drop_ra = true;
+ else
+ F2FS_I(inode)->ra_offset = page_index(page);
+ }
+
for (; nr_pages; nr_pages--) {
if (pages) {
page = list_last_entry(pages, struct page, lru);
prefetchw(&page->flags);
list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping,
+ if (drop_ra || add_to_page_cache_lru(page, mapping,
page_index(page),
readahead_gfp_mask(mapping)))
goto next_page;
@@ -2484,6 +2497,9 @@ int f2fs_mpage_readpages(struct address_space *mapping,
BUG_ON(pages && !list_empty(pages));
if (bio)
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
+
+ if (pages && is_readahead && !drop_ra)
+ F2FS_I(inode)->ra_offset = -1;
return pages ? 0 : ret;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 35afa13124b8..a95f84d72a55 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -806,6 +806,7 @@ struct f2fs_inode_info {
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
struct task_struct *inmem_task; /* store inmemory task */
struct mutex inmem_lock; /* lock for inmemory pages */
+ pgoff_t ra_offset; /* ongoing readahead offset */
struct extent_tree *extent_tree; /* cached extent_tree entry */
/* avoid racing between foreground op and gc */
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 0e860186a9c5..6fd2ad43d9e4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1011,6 +1011,8 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
+ fi->ra_offset = -1;
+
return &fi->vfs_inode;
}
--
2.27.0.111.gc72c7da667-goog
Powered by blists - more mailing lists