[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <72715a7c-b07b-a8d9-dfb9-23d758931c9c@huawei.com>
Date: Wed, 22 Mar 2017 14:30:10 +0800
From: Chao Yu <yuchao0@...wei.com>
To: Jaegeuk Kim <jaegeuk@...nel.org>
CC: <linux-f2fs-devel@...ts.sourceforge.net>,
<linux-kernel@...r.kernel.org>, <chao@...nel.org>
Subject: Re: [PATCH v2] f2fs: fix race condition in between free nid
allocator/initializer
Hi Jaegeuk,
On 2017/3/22 0:13, Jaegeuk Kim wrote:
> Hi Chao,
>
> On 03/21, Chao Yu wrote:
>> In below concurrent case, allocated nid can be loaded into free nid cache
>> and be allocated again.
>>
>> Thread A Thread B
>> - f2fs_create
>> - f2fs_new_inode
>> - alloc_nid
>> - __insert_nid_to_list(ALLOC_NID_LIST)
>> - f2fs_balance_fs_bg
>> - build_free_nids
>> - __build_free_nids
>> - scan_nat_page
>> - add_free_nid
>> - __lookup_nat_cache
>> - f2fs_add_link
>> - init_inode_metadata
>> - new_inode_page
>> - new_node_page
>> - set_node_addr
>> - alloc_nid_done
>> - __remove_nid_from_list(ALLOC_NID_LIST)
>> - __insert_nid_to_list(FREE_NID_LIST)
>>
>> This patch uses build_lock covering free nid allocation and initialization
>> to avoid this race condition.
>
> I have a concern about mutex contention.
Alright, till now I haven't caught any other race problem, so I think it would
be OK as well.
> How about this?
I change it a bit to avoid unneeded radix-tree operation, see below.
>
> ---
> fs/f2fs/node.c | 60 ++++++++++++++++++++++++++++++++++++++++------------------
> 1 file changed, 42 insertions(+), 18 deletions(-)
>
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index 347aa30ef0cf..25bc47087f91 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -1759,40 +1759,64 @@ static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
> static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
> {
> struct f2fs_nm_info *nm_i = NM_I(sbi);
> - struct free_nid *i;
> + struct free_nid *i, *e;
> struct nat_entry *ne;
> - int err;
> + int err = -EINVAL;
> + bool ret = false;
>
> /* 0 nid should not be used */
> if (unlikely(nid == 0))
> return false;
>
> - if (build) {
> - /* do not add allocated nids */
> - ne = __lookup_nat_cache(nm_i, nid);
> - if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
> - nat_get_blkaddr(ne) != NULL_ADDR))
> - return false;
> - }
> -
> i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
> i->nid = nid;
> i->state = NID_NEW;
>
> - if (radix_tree_preload(GFP_NOFS)) {
> - kmem_cache_free(free_nid_slab, i);
> - return true;
> - }
> + if (radix_tree_preload(GFP_NOFS))
> + goto err;
>
> spin_lock(&nm_i->nid_list_lock);
> +
> + if (build) {
> + /*
> + * Thread A Thread B
> + * - f2fs_create
> + * - f2fs_new_inode
> + * - alloc_nid
> + * - __insert_nid_to_list(ALLOC_NID_LIST)
> + * - f2fs_balance_fs_bg
> + * - build_free_nids
> + * - __build_free_nids
> + * - scan_nat_page
> + * - add_free_nid
> + * - __lookup_nat_cache
> + * - f2fs_add_link
> + * - init_inode_metadata
> + * - new_inode_page
> + * - new_node_page
> + * - set_node_addr
> + * - alloc_nid_done
> + * - __remove_nid_from_list(ALLOC_NID_LIST)
> + * - __insert_nid_to_list(FREE_NID_LIST)
> + */
> + ne = __lookup_nat_cache(nm_i, nid);
> + if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
> + nat_get_blkaddr(ne) != NULL_ADDR))
> + goto err_out;
> +
> + e = __lookup_free_nid_list(nm_i, nid);
> + if (e && e->state == NID_ALLOC)
e = __lookup_free_nid_list(nm_i, nid);
if (e) {
if (e->state == NID_NEW)
ret = true;
goto err_out;
}
Thanks,
> + goto err_out;
> + }
> + ret = true;
> err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
> +err_out:
> spin_unlock(&nm_i->nid_list_lock);
> radix_tree_preload_end();
> - if (err) {
> +err:
> + if (err)
> kmem_cache_free(free_nid_slab, i);
> - return true;
> - }
> - return true;
> + return ret;
> }
>
> static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
>
Powered by blists - more mailing lists