[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <807c19f9-f4d6-477d-8728-ab90734a4ebe@kernel.org>
Date: Fri, 6 Sep 2024 14:34:55 +0800
From: Chao Yu <chao@...nel.org>
To: Daeho Jeong <daeho43@...il.com>, linux-kernel@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net, kernel-team@...roid.com
Cc: Daeho Jeong <daehojeong@...gle.com>
Subject: Re: [f2fs-dev] [PATCH 5/7] f2fs: do FG_GC when GC boosting is
required for zoned devices
On 2024/8/30 5:52, Daeho Jeong wrote:
> From: Daeho Jeong <daehojeong@...gle.com>
>
> Under low free section count, we need to use FG_GC instead of BG_GC to
> recover free sections.
>
> Signed-off-by: Daeho Jeong <daehojeong@...gle.com>
> ---
> fs/f2fs/f2fs.h | 1 +
> fs/f2fs/gc.c | 24 +++++++++++++++++-------
> 2 files changed, 18 insertions(+), 7 deletions(-)
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 5e4db3ba534a..ee1fafc65e95 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1294,6 +1294,7 @@ struct f2fs_gc_control {
> bool should_migrate_blocks; /* should migrate blocks */
> bool err_gc_skipped; /* return EAGAIN if GC skipped */
bool one_time; /* require one time GC in one migration unit */
In order to avoid unnecessary padding for alignment.
Thanks,
> unsigned int nr_free_secs; /* # of free sections to do GC */
> + bool one_time; /* require one time GC in one migration unit */
> };
>
> /*
> diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
> index d6d71aab94f3..37b47a8d95f1 100644
> --- a/fs/f2fs/gc.c
> +++ b/fs/f2fs/gc.c
> @@ -81,6 +81,8 @@ static int gc_thread_func(void *data)
> continue;
> }
>
> + gc_control.one_time = false;
> +
> /*
> * [GC triggering condition]
> * 0. GC is not conducted currently.
> @@ -126,15 +128,19 @@ static int gc_thread_func(void *data)
> wait_ms = gc_th->max_sleep_time;
> }
>
> - if (need_to_boost_gc(sbi))
> + if (need_to_boost_gc(sbi)) {
> decrease_sleep_time(gc_th, &wait_ms);
> - else
> + if (f2fs_sb_has_blkzoned(sbi))
> + gc_control.one_time = true;
> + } else {
> increase_sleep_time(gc_th, &wait_ms);
> + }
> do_gc:
> stat_inc_gc_call_count(sbi, foreground ?
> FOREGROUND : BACKGROUND);
>
> - sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
> + sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
> + gc_control.one_time;
>
> /* foreground GC was been triggered via f2fs_balance_fs() */
> if (foreground)
> @@ -1701,7 +1707,7 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
> static int do_garbage_collect(struct f2fs_sb_info *sbi,
> unsigned int start_segno,
> struct gc_inode_list *gc_list, int gc_type,
> - bool force_migrate)
> + bool force_migrate, bool one_time)
> {
> struct page *sum_page;
> struct f2fs_summary_block *sum;
> @@ -1728,7 +1734,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
> sec_end_segno -= SEGS_PER_SEC(sbi) -
> f2fs_usable_segs_in_sec(sbi, segno);
>
> - if (gc_type == BG_GC) {
> + if (gc_type == BG_GC || one_time) {
> unsigned int migration_granularity =
> sbi->migration_granularity;
>
> @@ -1908,7 +1914,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
> }
>
> seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
> - gc_control->should_migrate_blocks);
> + gc_control->should_migrate_blocks,
> + gc_control->one_time);
> if (seg_freed < 0)
> goto stop;
>
> @@ -1919,6 +1926,9 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
> total_sec_freed++;
> }
>
> + if (gc_control->one_time)
> + goto stop;
> +
> if (gc_type == FG_GC) {
> sbi->cur_victim_sec = NULL_SEGNO;
>
> @@ -2044,7 +2054,7 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi,
> };
>
> do_garbage_collect(sbi, segno, &gc_list, FG_GC,
> - dry_run_sections == 0);
> + dry_run_sections == 0, false);
> put_gc_inode(&gc_list);
>
> if (!dry_run && get_valid_blocks(sbi, segno, true))
Powered by blists - more mailing lists