[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <d7b3ad0c-6474-459d-855d-c1c435a2bb17@kernel.org>
Date: Wed, 11 Sep 2024 15:12:25 +0800
From: Chao Yu <chao@...nel.org>
To: wangzijie1@...or.com
Cc: chao@...nel.org, jaegeuk@...nel.org, linux-kernel@...r.kernel.org,
linux-f2fs-devel@...ts.sourceforge.net
Subject: Re: [f2fs-dev] [RFC PATCH] f2fs: don't set SBI_QUOTA_NEED_REPAIR flag
if receive SIGKILL
On 2024/9/4 11:49, Chao Yu via Linux-f2fs-devel wrote:
> On 2024/9/3 14:06, wangzijie wrote:
>> From: Chao Yu via Linux-f2fs-devel <linux-f2fs-devel@...ts.sourceforge.net>
>>
>>> On 2024/8/27 14:22, wangzijie wrote:
>>>> Thread A
>>>> -dquot_initialize
>>>> -dqget
>>>> -f2fs_dquot_acquire
>>>> -v2_read_dquot
>>>> -qtree_read_dquot
>>>> -find_tree_dqentry
>>>> -f2fs_quota_read
>>>> -read_cache_page_gfp
>>>> -do_read_cache_folio
>>>> -fiemap_read_folio
>>>> -folio_wait_locked_killable
>>>> -receive SIGKILL : return -EINTR
>>>> -set SBI_QUOTA_NEED_REPAIR
>>>> -set SBI_QUOTA_NEED_REPAIR
>>>>
>>>> When calling read_cache_page_gfp in quota read, thread may receive SIGKILL and
>>>> set SBI_QUOTA_NEED_REPAIR, should we set SBI_QUOTA_NEED_REPAIR in this error path?
>>>
>>> f2fs_quota_read() can be called in a lot of contexts, can we just ignore -EINTR
>>> for f2fs_dquot_initialize() case?
>>>
>>> Thanks,
>>
>> Yes, in many contexts f2fs_quota_read() can be called and may return -EINTR, we need to ignore this errno for more cases. If we need to do so, I will check it and resend patch.
>> Or do you have other suggestions to avoid unnecessary SBI_QUOTA_NEED_REPAIR flag set?
>
> How about this?
>
> ---
> fs/f2fs/f2fs.h | 1 +
> fs/f2fs/inode.c | 3 +--
> fs/f2fs/super.c | 17 +++++++++++++----
> 3 files changed, 15 insertions(+), 6 deletions(-)
>
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index dfed1974eda5..a1704a19dfe9 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -810,6 +810,7 @@ enum {
> FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
> FI_ATOMIC_REPLACE, /* indicate atomic replace */
> FI_OPENED_FILE, /* indicate file has been opened */
> + FI_INIT_DQUOT, /* indicate it's initializing dquot */
> FI_MAX, /* max flag, never be used */
> };
>
> diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
> index 008f01348afa..b1dbaeda306f 100644
> --- a/fs/f2fs/inode.c
> +++ b/fs/f2fs/inode.c
> @@ -827,8 +827,7 @@ void f2fs_evict_inode(struct inode *inode)
>
> err = f2fs_dquot_initialize(inode);
> if (err) {
> - if (err != -EINTR)
> - set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
> + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
> err = 0;
> }
>
> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> index 8e29aba4b7a4..e774bdf875b2 100644
> --- a/fs/f2fs/super.c
> +++ b/fs/f2fs/super.c
> @@ -2644,8 +2644,11 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
> if (PTR_ERR(page) == -ENOMEM) {
> memalloc_retry_wait(GFP_NOFS);
> goto repeat;
> - } else if (PTR_ERR(page) != -EINTR)
> - set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
> + } else if (PTR_ERR(page) == -EINTR &&
> + is_inode_flag_set(inode, FI_INIT_DQUOT)) {
Current inode is quota inode, it's not the same inode we tagged
w/ FI_INIT_DQUOT, so please ignore this diff...
Thanks,
> + return PTR_ERR(page);
> + }
> + set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
> return PTR_ERR(page);
> }
>
> @@ -2721,10 +2724,16 @@ static ssize_t f2fs_quota_write(struct super_block *sb, int type,
>
> int f2fs_dquot_initialize(struct inode *inode)
> {
> + int ret;
> +
> if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
> return -ESRCH;
>
> - return dquot_initialize(inode);
> + set_inode_flag(inode, FI_INIT_DQUOT);
> + ret = dquot_initialize(inode);
> + clear_inode_flag(inode, FI_INIT_DQUOT);
> +
> + return ret;
> }
>
> static struct dquot __rcu **f2fs_get_dquots(struct inode *inode)
> @@ -3064,7 +3073,7 @@ static int f2fs_dquot_acquire(struct dquot *dquot)
>
> f2fs_down_read(&sbi->quota_sem);
> ret = dquot_acquire(dquot);
> - if (ret < 0 && ret != -EINTR)
> + if (ret < 0)
> set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
> f2fs_up_read(&sbi->quota_sem);
> return ret;
Powered by blists - more mailing lists