lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 6 Jan 2015 11:32:22 -0800
From:	Jaegeuk Kim <jaegeuk@...nel.org>
To:	Chao Yu <chao2.yu@...sung.com>
Cc:	Changman Lee <cm224.lee@...sung.com>,
	linux-f2fs-devel@...ts.sourceforge.net,
	linux-kernel@...r.kernel.org
Subject: Re: [f2fs-dev][PATCH 1/2] f2fs: get rid of kzalloc in
 __recover_inline_status

Hi Chao,

On Tue, Jan 06, 2015 at 02:28:43PM +0800, Chao Yu wrote:
> We use kzalloc to allocate memory in __recover_inline_status, and use this
> all-zero memory to check the inline date content of inode page by comparing
> them. This is low effective and not needed, let's check inline date content
> directly.
> 
> Signed-off-by: Chao Yu <chao2.yu@...sung.com>
> ---
>  fs/f2fs/inode.c | 27 +++++++++++----------------
>  1 file changed, 11 insertions(+), 16 deletions(-)
> 
> diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
> index 3a8958d..8be0fd5 100644
> --- a/fs/f2fs/inode.c
> +++ b/fs/f2fs/inode.c
> @@ -67,29 +67,25 @@ static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
>  	}
>  }
>  
> -static int __recover_inline_status(struct inode *inode, struct page *ipage)
> +static void __recover_inline_status(struct inode *inode, struct page *ipage)
>  {
>  	void *inline_data = inline_data_addr(ipage);
> +	__le32 *start = inline_data;
> +	__le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
>  	struct f2fs_inode *ri;
> -	void *zbuf;
> -
> -	zbuf = kzalloc(MAX_INLINE_DATA, GFP_NOFS);
> -	if (!zbuf)
> -		return -ENOMEM;
> -
> -	if (!memcmp(zbuf, inline_data, MAX_INLINE_DATA)) {
> -		kfree(zbuf);
> -		return 0;
> -	}
> -	kfree(zbuf);
>  
> +	while (start < end)
> +		if (*start++)
> +			goto recover;
> +	return;
> +recover:
>  	f2fs_wait_on_page_writeback(ipage, NODE);
>  	set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
>  
>  	ri = F2FS_INODE(ipage);
>  	set_raw_inline(F2FS_I(inode), ri);
>  	set_page_dirty(ipage);
> -	return 0;
> +	return;
>  }

I think *goto recovery* is not a good way.

How's like this?
If you don't mind, I'll merge the patch with this.

static void __recover_inline_status(struct inode *inode, struct page *ipage)
{
	void *inline_data = inline_data_addr(ipage);
	__le32 *start = inline_data;
	__le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);

	while (start < end) {
		if (*start++) {
			f2fs_wait_on_page_writeback(ipage, NODE);

			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
			set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
			set_page_dirty(ipage);
			return;
		}
	}
	return;
}

Thanks,

>  
>  static int do_read_inode(struct inode *inode)
> @@ -98,7 +94,6 @@ static int do_read_inode(struct inode *inode)
>  	struct f2fs_inode_info *fi = F2FS_I(inode);
>  	struct page *node_page;
>  	struct f2fs_inode *ri;
> -	int err = 0;
>  
>  	/* Check if ino is within scope */
>  	if (check_nid_range(sbi, inode->i_ino)) {
> @@ -142,7 +137,7 @@ static int do_read_inode(struct inode *inode)
>  
>  	/* check data exist */
>  	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
> -		err = __recover_inline_status(inode, node_page);
> +		__recover_inline_status(inode, node_page);
>  
>  	/* get rdev by using inline_info */
>  	__get_inode_rdev(inode, ri);
> @@ -152,7 +147,7 @@ static int do_read_inode(struct inode *inode)
>  	stat_inc_inline_inode(inode);
>  	stat_inc_inline_dir(inode);
>  
> -	return err;
> +	return 0;
>  }
>  
>  struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
> -- 
> 2.2.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ