[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200702041414.3017119-1-daeho43@gmail.com>
Date: Thu, 2 Jul 2020 13:14:14 +0900
From: Daeho Jeong <daeho43@...il.com>
To: linux-kernel@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net, kernel-team@...roid.com
Cc: Daeho Jeong <daehojeong@...gle.com>
Subject: [PATCH v2] f2fs: add GC_URGENT_LOW mode in gc_urgent
From: Daeho Jeong <daehojeong@...gle.com>
Added a new gc_urgent mode, GC_URGENT_LOW, in which mode
F2FS will lower the bar of checking idle in order to
process outstanding discard commands and GC a little bit
aggressively.
Signed-off-by: Daeho Jeong <daehojeong@...gle.com>
---
Documentation/ABI/testing/sysfs-fs-f2fs | 4 +++-
fs/f2fs/f2fs.h | 10 ++++++++--
fs/f2fs/gc.c | 6 +++---
fs/f2fs/segment.c | 4 ++--
fs/f2fs/sysfs.c | 10 +++++++---
5 files changed, 23 insertions(+), 11 deletions(-)
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 4bb93a06d8ab..7f730c4c8df2 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -229,7 +229,9 @@ Date: August 2017
Contact: "Jaegeuk Kim" <jaegeuk@...nel.org>
Description: Do background GC agressively when set. When gc_urgent = 1,
background thread starts to do GC by given gc_urgent_sleep_time
- interval. It is set to 0 by default.
+ interval. When gc_urgent = 2, F2FS will lower the bar of
+ checking idle in order to process outstanding discard commands
+ and GC a little bit aggressively. It is set to 0 by default.
What: /sys/fs/f2fs/<disk>/gc_urgent_sleep_time
Date: August 2017
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e6e47618a357..4b28fd42fdbc 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1283,7 +1283,8 @@ enum {
GC_NORMAL,
GC_IDLE_CB,
GC_IDLE_GREEDY,
- GC_URGENT,
+ GC_URGENT_HIGH,
+ GC_URGENT_LOW,
};
enum {
@@ -1540,6 +1541,7 @@ struct f2fs_sb_info {
unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */
unsigned int next_victim_seg[2]; /* next segment in victim section */
+
/* for skip statistic */
unsigned int atomic_files; /* # of opened atomic file */
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
@@ -2480,7 +2482,7 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
{
- if (sbi->gc_mode == GC_URGENT)
+ if (sbi->gc_mode == GC_URGENT_HIGH)
return true;
if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
@@ -2498,6 +2500,10 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
return false;
+ if (sbi->gc_mode == GC_URGENT_LOW &&
+ (type == DISCARD_TIME || type == GC_TIME))
+ return true;
+
return f2fs_time_over(sbi, type);
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 6eec3b2d606d..3b718da69910 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -82,7 +82,7 @@ static int gc_thread_func(void *data)
* invalidated soon after by user update or deletion.
* So, I'd like to wait some time to collect dirty segments.
*/
- if (sbi->gc_mode == GC_URGENT) {
+ if (sbi->gc_mode == GC_URGENT_HIGH) {
wait_ms = gc_th->urgent_sleep_time;
down_write(&sbi->gc_lock);
goto do_gc;
@@ -176,7 +176,7 @@ static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
gc_mode = GC_CB;
break;
case GC_IDLE_GREEDY:
- case GC_URGENT:
+ case GC_URGENT_HIGH:
gc_mode = GC_GREEDY;
break;
}
@@ -211,7 +211,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
* foreground GC and urgent GC cases.
*/
if (gc_type != FG_GC &&
- (sbi->gc_mode != GC_URGENT) &&
+ (sbi->gc_mode != GC_URGENT_HIGH) &&
p->max_search > sbi->max_victim_search)
p->max_search = sbi->max_victim_search;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index b45e473508a9..5924b3965ae4 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -174,7 +174,7 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
if (f2fs_lfs_mode(sbi))
return false;
- if (sbi->gc_mode == GC_URGENT)
+ if (sbi->gc_mode == GC_URGENT_HIGH)
return true;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return true;
@@ -1759,7 +1759,7 @@ static int issue_discard_thread(void *data)
continue;
}
- if (sbi->gc_mode == GC_URGENT)
+ if (sbi->gc_mode == GC_URGENT_HIGH)
__init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
sb_start_intwrite(sbi->sb);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index e877c59b9fdb..2a140657fc4d 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -350,16 +350,20 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
return -EINVAL;
if (!strcmp(a->attr.name, "gc_urgent")) {
- if (t >= 1) {
- sbi->gc_mode = GC_URGENT;
+ if (t == 0) {
+ sbi->gc_mode = GC_NORMAL;
+ } else if (t == 1) {
+ sbi->gc_mode = GC_URGENT_HIGH;
if (sbi->gc_thread) {
sbi->gc_thread->gc_wake = 1;
wake_up_interruptible_all(
&sbi->gc_thread->gc_wait_queue_head);
wake_up_discard_thread(sbi, true);
}
+ } else if (t == 2) {
+ sbi->gc_mode = GC_URGENT_LOW;
} else {
- sbi->gc_mode = GC_NORMAL;
+ return -EINVAL;
}
return count;
}
--
2.27.0.383.g050319c2ae-goog
Powered by blists - more mailing lists