[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <dd51e53e-aa4a-7879-1e77-839c22bc66cf@huawei.com>
Date: Wed, 8 Mar 2017 19:16:49 +0800
From: Chao Yu <yuchao0@...wei.com>
To: Jaegeuk Kim <jaegeuk@...nel.org>
CC: <linux-f2fs-devel@...ts.sourceforge.net>,
<linux-kernel@...r.kernel.org>, <chao@...nel.org>
Subject: Re: [PATCH v2] f2fs: combine nat_bits and free_nid_bitmap cache
Hi Jaegeuk,
On 2017/3/8 6:27, Jaegeuk Kim wrote:
> Hi Chao,
>
> There were two reasons for slow down; one is set_bit_le and the other was
> spin_lock calls.
>
> I think it would'b good to merge this patch in yours as well.
> Let me know. I'm ready to integrate together and test them.
The change looks good to me. ;)
Anyway I will send last updated patch which you have merged in order to tracking
others' comments if there will be.
Thanks,
>
> Signed-off-by: Jaegeuk Kim <jaegeuk@...nel.org>
> ---
> fs/f2fs/node.c | 22 +++++++++++++---------
> 1 file changed, 13 insertions(+), 9 deletions(-)
>
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index 3377a512e299..0a1ea59c9d31 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -1815,7 +1815,7 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
> }
>
> void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
> - bool set, bool build)
> + bool set, bool build, bool locked)
> {
> struct f2fs_nm_info *nm_i = NM_I(sbi);
> unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
> @@ -1829,12 +1829,14 @@ void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
> else
> __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
>
> - spin_lock(&nm_i->free_nid_lock);
> + if (!locked)
> + spin_lock(&nm_i->free_nid_lock);
> if (set)
> nm_i->free_nid_count[nat_ofs]++;
> else if (!build)
> nm_i->free_nid_count[nat_ofs]--;
> - spin_unlock(&nm_i->free_nid_lock);
> + if (!locked)
> + spin_unlock(&nm_i->free_nid_lock);
> }
>
> static void scan_nat_page(struct f2fs_sb_info *sbi,
> @@ -1863,7 +1865,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
> f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
> if (blk_addr == NULL_ADDR)
> freed = add_free_nid(sbi, start_nid, true);
> - update_free_nid_bitmap(sbi, start_nid, freed, true);
> + update_free_nid_bitmap(sbi, start_nid, freed, true, false);
> }
> }
>
> @@ -2018,7 +2020,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
> __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
> nm_i->available_nids--;
>
> - update_free_nid_bitmap(sbi, *nid, false, false);
> + update_free_nid_bitmap(sbi, *nid, false, false, false);
>
> spin_unlock(&nm_i->nid_list_lock);
> return true;
> @@ -2074,7 +2076,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
>
> nm_i->available_nids++;
>
> - update_free_nid_bitmap(sbi, nid, true, false);
> + update_free_nid_bitmap(sbi, nid, true, false, false);
>
> spin_unlock(&nm_i->nid_list_lock);
>
> @@ -2404,11 +2406,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
> add_free_nid(sbi, nid, false);
> spin_lock(&NM_I(sbi)->nid_list_lock);
> NM_I(sbi)->available_nids++;
> - update_free_nid_bitmap(sbi, nid, true, false);
> + update_free_nid_bitmap(sbi, nid, true, false, false);
> spin_unlock(&NM_I(sbi)->nid_list_lock);
> } else {
> spin_lock(&NM_I(sbi)->nid_list_lock);
> - update_free_nid_bitmap(sbi, nid, false, false);
> + update_free_nid_bitmap(sbi, nid, false, false, false);
> spin_unlock(&NM_I(sbi)->nid_list_lock);
> }
> }
> @@ -2533,8 +2535,10 @@ inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
> nid = i * NAT_ENTRY_PER_BLOCK;
> last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
>
> + spin_lock(&nm_i->free_nid_lock);
> for (; nid < last_nid; nid++)
> - update_free_nid_bitmap(sbi, nid, true, true);
> + update_free_nid_bitmap(sbi, nid, true, true, true);
> + spin_unlock(&nm_i->free_nid_lock);
> }
>
> for (i = 0; i < nm_i->nat_blocks; i++) {
>
Powered by blists - more mailing lists