lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <alpine.LFD.2.00.1404171010010.2143@localhost.localdomain>
Date:	Thu, 17 Apr 2014 10:13:26 +0200 (CEST)
From:	Lukáš Czerner <lczerner@...hat.com>
To:	mingming cao <mingming@...cle.com>
cc:	linux-ext4@...r.kernel.org
Subject: Re: [PATCH 2/2] ext4: Rename uninitialized extents to unwritten

On Wed, 16 Apr 2014, mingming cao wrote:

> Date: Wed, 16 Apr 2014 17:15:13 -0700
> From: mingming cao <mingming@...cle.com>
> To: Lukas Czerner <lczerner@...hat.com>
> Cc: linux-ext4@...r.kernel.org
> Newsgroups: gmane.comp.file-systems.ext4
> Subject: Re: [PATCH 2/2] ext4: Rename uninitialized extents to unwritten
> 
> On 04/10/2014 10:14 AM, Lukas Czerner wrote:
> > Currently in ext4 there is quite a mess when it comes to naming
> > unwritten extents. Sometimes we call it uninitialized and sometimes we
> > refer to it as unwritten.
> > 
> > The right name for the extent which has been allocated but does not
> > contain any written data is _unwritten_. Other file systems are
> > using this name consistently, even the buffer head state refers to it as
> > unwritten. We need to fix this confusion in ext4.
> > 
> > This commit changes every reference to an uninitialized extent (meaning
> > allocated but unwritten) to unwritten extent. This includes comments,
> > function names and variable names. It even covers abbreviation of the
> > word uninitialized (such as uninit) and some misspellings.
> > 
> > This commit does not change any of the code paths at all. This has been
> > confirmed by comparing md5sums of the assembly code of each object file
> > after all the function names were stripped from it.
> > 
> > Signed-off-by: Lukas Czerner <lczerner@...hat.com>
> 
> 
> Sometime back I thought the unwritten extent means preallocated on disk but
> valid data stored in page cache yet to written to disk, where uninitialized
> extents means just preallocated but nothing valid data on disk and in memory.
> Seems the difference do not stand anymore.  Really nice to get rid of the
> confusion of these two flags. Thanks!
> 
> In the mean while, shall we rename all the places reference "initialized
> extents" to "written extents"?

Hi Mingming,

I think that we're using "initialized" extents consistently
throughout the ext4 code and I do not think that it's causing any
confusion so I do not feel that we really need to change that,
however if people think that we need to change it I can prepare
patches that should be easy enough.

Thanks!
-Lukas

> 
> Regards,
> Mingming
> > ---
> >   fs/ext4/ext4.h              |  16 ++--
> >   fs/ext4/ext4_extents.h      |  22 ++---
> >   fs/ext4/extents.c           | 218
> > ++++++++++++++++++++++----------------------
> >   fs/ext4/extents_status.c    |   2 +-
> >   fs/ext4/file.c              |   2 +-
> >   fs/ext4/inode.c             |  18 ++--
> >   fs/ext4/move_extent.c       |  38 ++++----
> >   fs/ext4/super.c             |   2 +-
> >   include/trace/events/ext4.h |   8 +-
> >   9 files changed, 163 insertions(+), 163 deletions(-)
> > 
> > diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
> > index b731538..71d0184 100644
> > --- a/fs/ext4/ext4.h
> > +++ b/fs/ext4/ext4.h
> > @@ -183,7 +183,7 @@ struct ext4_map_blocks {
> >   #define	EXT4_IO_END_UNWRITTEN	0x0001
> > 
> >   /*
> > - * For converting uninitialized extents on a work queue. 'handle' is used
> > for
> > + * For converting unwritten extents on a work queue. 'handle' is used for
> >    * buffered writeback.
> >    */
> >   typedef struct ext4_io_end {
> > @@ -536,26 +536,26 @@ enum {
> >   /*
> >    * Flags used by ext4_map_blocks()
> >    */
> > -	/* Allocate any needed blocks and/or convert an unitialized
> > +	/* Allocate any needed blocks and/or convert an unwritten
> >   	   extent to be an initialized ext4 */
> >   #define EXT4_GET_BLOCKS_CREATE			0x0001
> > -	/* Request the creation of an unitialized extent */
> > -#define EXT4_GET_BLOCKS_UNINIT_EXT		0x0002
> > -#define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT	(EXT4_GET_BLOCKS_UNINIT_EXT|\
> > +	/* Request the creation of an unwritten extent */
> > +#define EXT4_GET_BLOCKS_UNWRIT_EXT		0x0002
> > +#define EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT	(EXT4_GET_BLOCKS_UNWRIT_EXT|\
> >   						 EXT4_GET_BLOCKS_CREATE)
> >   	/* Caller is from the delayed allocation writeout path
> >   	 * finally doing the actual allocation of delayed blocks */
> >   #define EXT4_GET_BLOCKS_DELALLOC_RESERVE	0x0004
> >   	/* caller is from the direct IO path, request to creation of an
> > -	unitialized extents if not allocated, split the uninitialized
> > +	unwritten extents if not allocated, split the unwritten
> >   	extent if blocks has been preallocated already*/
> >   #define EXT4_GET_BLOCKS_PRE_IO			0x0008
> >   #define EXT4_GET_BLOCKS_CONVERT			0x0010
> >   #define EXT4_GET_BLOCKS_IO_CREATE_EXT
> > (EXT4_GET_BLOCKS_PRE_IO|\
> > -					 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
> > +					 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
> >   	/* Convert extent to initialized after IO complete */
> >   #define EXT4_GET_BLOCKS_IO_CONVERT_EXT
> > (EXT4_GET_BLOCKS_CONVERT|\
> > -					 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
> > +					 EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
> >   	/* Eventual metadata allocation (due to growing extent tree)
> >   	 * should not fail, so try to use reserved blocks for that.*/
> >   #define EXT4_GET_BLOCKS_METADATA_NOFAIL		0x0020
> > diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
> > index 5074fe2..a867f5c 100644
> > --- a/fs/ext4/ext4_extents.h
> > +++ b/fs/ext4/ext4_extents.h
> > @@ -137,21 +137,21 @@ struct ext4_ext_path {
> >    * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
> >    * initialized extent. This is 2^15 and not (2^16 - 1), since we use the
> >    * MSB of ee_len field in the extent datastructure to signify if this
> > - * particular extent is an initialized extent or an uninitialized (i.e.
> > + * particular extent is an initialized extent or an unwritten (i.e.
> >    * preallocated).
> > - * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an
> > - * uninitialized extent.
> > + * EXT_UNWRITTEN_MAX_LEN is the maximum number of blocks we can have in an
> > + * unwritten extent.
> >    * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is
> > an
> > - * uninitialized one. In other words, if MSB of ee_len is set, it is an
> > - * uninitialized extent with only one special scenario when ee_len =
> > 0x8000.
> > - * In this case we can not have an uninitialized extent of zero length and
> > + * unwritten one. In other words, if MSB of ee_len is set, it is an
> > + * unwritten extent with only one special scenario when ee_len = 0x8000.
> > + * In this case we can not have an unwritten extent of zero length and
> >    * thus we make it as a special case of initialized extent with 0x8000
> > length.
> >    * This way we get better extent-to-group alignment for initialized
> > extents.
> >    * Hence, the maximum number of blocks we can have in an *initialized*
> > - * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1
> > (32767).
> > + * extent is 2^15 (32768) and in an *unwritten* extent is 2^15-1 (32767).
> >    */
> >   #define EXT_INIT_MAX_LEN	(1UL << 15)
> > -#define EXT_UNINIT_MAX_LEN	(EXT_INIT_MAX_LEN - 1)
> > +#define EXT_UNWRITTEN_MAX_LEN	(EXT_INIT_MAX_LEN - 1)
> > 
> > 
> >   #define EXT_FIRST_EXTENT(__hdr__) \
> > @@ -187,14 +187,14 @@ static inline unsigned short ext_depth(struct inode
> > *inode)
> >   	return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
> >   }
> > 
> > -static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
> > +static inline void ext4_ext_mark_unwritten(struct ext4_extent *ext)
> >   {
> > -	/* We can not have an uninitialized extent of zero length! */
> > +	/* We can not have an unwritten extent of zero length! */
> >   	BUG_ON((le16_to_cpu(ext->ee_len) & ~EXT_INIT_MAX_LEN) == 0);
> >   	ext->ee_len |= cpu_to_le16(EXT_INIT_MAX_LEN);
> >   }
> > 
> > -static inline int ext4_ext_is_uninitialized(struct ext4_extent *ext)
> > +static inline int ext4_ext_is_unwritten(struct ext4_extent *ext)
> >   {
> >   	/* Extent with ee_len of 0x8000 is treated as an initialized extent */
> >   	return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN);
> > diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
> > index 299eb72..89f2227 100644
> > --- a/fs/ext4/extents.c
> > +++ b/fs/ext4/extents.c
> > @@ -50,8 +50,8 @@
> >    */
> >   #define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails
> > \
> >   					due to ENOSPC */
> > -#define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized
> > */
> > -#define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized
> > */
> > +#define EXT4_EXT_MARK_UNWRIT1	0x2  /* mark first half unwritten */
> > +#define EXT4_EXT_MARK_UNWRIT2	0x4  /* mark second half unwritten */
> > 
> >   #define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data
> > */
> >   #define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid
> > data */
> > @@ -524,7 +524,7 @@ __read_extent_tree_block(const char *function, unsigned
> > int line,
> >   						     lblk - prev, ~0,
> >   						     EXTENT_STATUS_HOLE);
> > 
> > -			if (ext4_ext_is_uninitialized(ex))
> > +			if (ext4_ext_is_unwritten(ex))
> >   				status = EXTENT_STATUS_UNWRITTEN;
> >   			ext4_es_cache_extent(inode, lblk, len,
> >   					     ext4_ext_pblock(ex), status);
> > @@ -620,7 +620,7 @@ static void ext4_ext_show_path(struct inode *inode,
> > struct ext4_ext_path *path)
> >   		} else if (path->p_ext) {
> >   			ext_debug("  %d:[%d]%d:%llu ",
> >   				  le32_to_cpu(path->p_ext->ee_block),
> > -				  ext4_ext_is_uninitialized(path->p_ext),
> > +				  ext4_ext_is_unwritten(path->p_ext),
> >   				  ext4_ext_get_actual_len(path->p_ext),
> >   				  ext4_ext_pblock(path->p_ext));
> >   		} else
> > @@ -646,7 +646,7 @@ static void ext4_ext_show_leaf(struct inode *inode,
> > struct ext4_ext_path *path)
> > 
> >   	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
> >   		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
> > -			  ext4_ext_is_uninitialized(ex),
> > +			  ext4_ext_is_unwritten(ex),
> >   			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
> >   	}
> >   	ext_debug("\n");
> > @@ -677,7 +677,7 @@ static void ext4_ext_show_move(struct inode *inode,
> > struct ext4_ext_path *path,
> >   		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
> >   				le32_to_cpu(ex->ee_block),
> >   				ext4_ext_pblock(ex),
> > -				ext4_ext_is_uninitialized(ex),
> > +				ext4_ext_is_unwritten(ex),
> >   				ext4_ext_get_actual_len(ex),
> >   				newblock);
> >   		ex++;
> > @@ -802,7 +802,7 @@ ext4_ext_binsearch(struct inode *inode,
> >   	ext_debug("  -> %d:%llu:[%d]%d ",
> >   			le32_to_cpu(path->p_ext->ee_block),
> >   			ext4_ext_pblock(path->p_ext),
> > -			ext4_ext_is_uninitialized(path->p_ext),
> > +			ext4_ext_is_unwritten(path->p_ext),
> >   			ext4_ext_get_actual_len(path->p_ext));
> > 
> >   #ifdef CHECK_BINSEARCH
> > @@ -1686,11 +1686,11 @@ ext4_can_extents_be_merged(struct inode *inode,
> > struct ext4_extent *ex1,
> > 
> >   	/*
> >   	 * Make sure that both extents are initialized. We don't merge
> > -	 * uninitialized extents so that we can be sure that end_io code has
> > +	 * unwritten extents so that we can be sure that end_io code has
> >   	 * the extent that was written properly split out and conversion to
> >   	 * initialized is trivial.
> >   	 */
> > -	if (ext4_ext_is_uninitialized(ex1) != ext4_ext_is_uninitialized(ex2))
> > +	if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
> >   		return 0;
> > 
> >   	ext1_ee_len = ext4_ext_get_actual_len(ex1);
> > @@ -1707,10 +1707,10 @@ ext4_can_extents_be_merged(struct inode *inode,
> > struct ext4_extent *ex1,
> >   	 */
> >   	if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
> >   		return 0;
> > -	if (ext4_ext_is_uninitialized(ex1) &&
> > +	if (ext4_ext_is_unwritten(ex1) &&
> >   	    (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
> >   	     atomic_read(&EXT4_I(inode)->i_unwritten) ||
> > -	     (ext1_ee_len + ext2_ee_len > EXT_UNINIT_MAX_LEN)))
> > +	     (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
> >   		return 0;
> >   #ifdef AGGRESSIVE_TEST
> >   	if (ext1_ee_len >= 4)
> > @@ -1735,7 +1735,7 @@ static int ext4_ext_try_to_merge_right(struct inode
> > *inode,
> >   {
> >   	struct ext4_extent_header *eh;
> >   	unsigned int depth, len;
> > -	int merge_done = 0, uninit;
> > +	int merge_done = 0, unwritten;
> > 
> >   	depth = ext_depth(inode);
> >   	BUG_ON(path[depth].p_hdr == NULL);
> > @@ -1745,11 +1745,11 @@ static int ext4_ext_try_to_merge_right(struct inode
> > *inode,
> >   		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
> >   			break;
> >   		/* merge with next extent! */
> > -		uninit = ext4_ext_is_uninitialized(ex);
> > +		unwritten = ext4_ext_is_unwritten(ex);
> >   		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
> >   				+ ext4_ext_get_actual_len(ex + 1));
> > -		if (uninit)
> > -			ext4_ext_mark_uninitialized(ex);
> > +		if (unwritten)
> > +			ext4_ext_mark_unwritten(ex);
> > 
> >   		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
> >   			len = (EXT_LAST_EXTENT(eh) - ex - 1)
> > @@ -1903,7 +1903,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct
> > inode *inode,
> >   	struct ext4_ext_path *npath = NULL;
> >   	int depth, len, err;
> >   	ext4_lblk_t next;
> > -	int mb_flags = 0, uninit;
> > +	int mb_flags = 0, unwritten;
> > 
> >   	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
> >   		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) ==
> > 0");
> > @@ -1943,21 +1943,21 @@ int ext4_ext_insert_extent(handle_t *handle, struct
> > inode *inode,
> >   		if (ext4_can_extents_be_merged(inode, ex, newext)) {
> >   			ext_debug("append [%d]%d block to %u:[%d]%d"
> >   				  "(from %llu)\n",
> > -				  ext4_ext_is_uninitialized(newext),
> > +				  ext4_ext_is_unwritten(newext),
> >   				  ext4_ext_get_actual_len(newext),
> >   				  le32_to_cpu(ex->ee_block),
> > -				  ext4_ext_is_uninitialized(ex),
> > +				  ext4_ext_is_unwritten(ex),
> >   				  ext4_ext_get_actual_len(ex),
> >   				  ext4_ext_pblock(ex));
> >   			err = ext4_ext_get_access(handle, inode,
> >   						  path + depth);
> >   			if (err)
> >   				return err;
> > -			uninit = ext4_ext_is_uninitialized(ex);
> > +			unwritten = ext4_ext_is_unwritten(ex);
> >   			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
> >   					+ ext4_ext_get_actual_len(newext));
> > -			if (uninit)
> > -				ext4_ext_mark_uninitialized(ex);
> > +			if (unwritten)
> > +				ext4_ext_mark_unwritten(ex);
> >   			eh = path[depth].p_hdr;
> >   			nearex = ex;
> >   			goto merge;
> > @@ -1969,10 +1969,10 @@ prepend:
> >   			ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
> >   				  "(from %llu)\n",
> >   				  le32_to_cpu(newext->ee_block),
> > -				  ext4_ext_is_uninitialized(newext),
> > +				  ext4_ext_is_unwritten(newext),
> >   				  ext4_ext_get_actual_len(newext),
> >   				  le32_to_cpu(ex->ee_block),
> > -				  ext4_ext_is_uninitialized(ex),
> > +				  ext4_ext_is_unwritten(ex),
> >   				  ext4_ext_get_actual_len(ex),
> >   				  ext4_ext_pblock(ex));
> >   			err = ext4_ext_get_access(handle, inode,
> > @@ -1980,13 +1980,13 @@ prepend:
> >   			if (err)
> >   				return err;
> > 
> > -			uninit = ext4_ext_is_uninitialized(ex);
> > +			unwritten = ext4_ext_is_unwritten(ex);
> >   			ex->ee_block = newext->ee_block;
> >   			ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
> >   			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
> >   					+ ext4_ext_get_actual_len(newext));
> > -			if (uninit)
> > -				ext4_ext_mark_uninitialized(ex);
> > +			if (unwritten)
> > +				ext4_ext_mark_unwritten(ex);
> >   			eh = path[depth].p_hdr;
> >   			nearex = ex;
> >   			goto merge;
> > @@ -2046,7 +2046,7 @@ has_space:
> >   		ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
> >   				le32_to_cpu(newext->ee_block),
> >   				ext4_ext_pblock(newext),
> > -				ext4_ext_is_uninitialized(newext),
> > +				ext4_ext_is_unwritten(newext),
> >   				ext4_ext_get_actual_len(newext));
> >   		nearex = EXT_FIRST_EXTENT(eh);
> >   	} else {
> > @@ -2057,7 +2057,7 @@ has_space:
> >   					"nearest %p\n",
> >   					le32_to_cpu(newext->ee_block),
> >   					ext4_ext_pblock(newext),
> > -					ext4_ext_is_uninitialized(newext),
> > +					ext4_ext_is_unwritten(newext),
> >   					ext4_ext_get_actual_len(newext),
> >   					nearex);
> >   			nearex++;
> > @@ -2068,7 +2068,7 @@ has_space:
> >   					"nearest %p\n",
> >   					le32_to_cpu(newext->ee_block),
> >   					ext4_ext_pblock(newext),
> > -					ext4_ext_is_uninitialized(newext),
> > +					ext4_ext_is_unwritten(newext),
> >   					ext4_ext_get_actual_len(newext),
> >   					nearex);
> >   		}
> > @@ -2078,7 +2078,7 @@ has_space:
> >   					"move %d extents from 0x%p to 0x%p\n",
> >   					le32_to_cpu(newext->ee_block),
> >   					ext4_ext_pblock(newext),
> > -					ext4_ext_is_uninitialized(newext),
> > +					ext4_ext_is_unwritten(newext),
> >   					ext4_ext_get_actual_len(newext),
> >   					len, nearex, nearex + 1);
> >   			memmove(nearex + 1, nearex,
> > @@ -2200,7 +2200,7 @@ static int ext4_fill_fiemap_extents(struct inode
> > *inode,
> >   			es.es_lblk = le32_to_cpu(ex->ee_block);
> >   			es.es_len = ext4_ext_get_actual_len(ex);
> >   			es.es_pblk = ext4_ext_pblock(ex);
> > -			if (ext4_ext_is_uninitialized(ex))
> > +			if (ext4_ext_is_unwritten(ex))
> >   				flags |= FIEMAP_EXTENT_UNWRITTEN;
> >   		}
> > 
> > @@ -2576,7 +2576,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode
> > *inode,
> >   	unsigned num;
> >   	ext4_lblk_t ex_ee_block;
> >   	unsigned short ex_ee_len;
> > -	unsigned uninitialized = 0;
> > +	unsigned unwritten = 0;
> >   	struct ext4_extent *ex;
> >   	ext4_fsblk_t pblk;
> > 
> > @@ -2623,13 +2623,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode
> > *inode,
> >   	while (ex >= EXT_FIRST_EXTENT(eh) &&
> >   			ex_ee_block + ex_ee_len > start) {
> > 
> > -		if (ext4_ext_is_uninitialized(ex))
> > -			uninitialized = 1;
> > +		if (ext4_ext_is_unwritten(ex))
> > +			unwritten = 1;
> >   		else
> > -			uninitialized = 0;
> > +			unwritten = 0;
> > 
> >   		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
> > -			 uninitialized, ex_ee_len);
> > +			  unwritten, ex_ee_len);
> >   		path[depth].p_ext = ex;
> > 
> >   		a = ex_ee_block > start ? ex_ee_block : start;
> > @@ -2701,11 +2701,11 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode
> > *inode,
> > 
> >   		ex->ee_len = cpu_to_le16(num);
> >   		/*
> > -		 * Do not mark uninitialized if all the blocks in the
> > +		 * Do not mark unwritten if all the blocks in the
> >   		 * extent have been removed.
> >   		 */
> > -		if (uninitialized && num)
> > -			ext4_ext_mark_uninitialized(ex);
> > +		if (unwritten && num)
> > +			ext4_ext_mark_unwritten(ex);
> >   		/*
> >   		 * If the extent was completely released,
> >   		 * we need to remove it from the leaf
> > @@ -2854,9 +2854,9 @@ again:
> >   		    end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
> >   			int split_flag = 0;
> > 
> > -			if (ext4_ext_is_uninitialized(ex))
> > -				split_flag = EXT4_EXT_MARK_UNINIT1 |
> > -					     EXT4_EXT_MARK_UNINIT2;
> > +			if (ext4_ext_is_unwritten(ex))
> > +				split_flag = EXT4_EXT_MARK_UNWRIT1 |
> > +					     EXT4_EXT_MARK_UNWRIT2;
> > 
> >   			/*
> >   			 * Split the extent in two so that 'end' is the last
> > @@ -3113,7 +3113,7 @@ static int ext4_ext_zeroout(struct inode *inode,
> > struct ext4_extent *ex)
> >    * @path: the path to the extent
> >    * @split: the logical block where the extent is splitted.
> >    * @split_flags: indicates if the extent could be zeroout if split fails,
> > and
> > - *		 the states(init or uninit) of new extents.
> > + *		 the states(init or unwritten) of new extents.
> >    * @flags: flags used to insert new extent to extent tree.
> >    *
> >    *
> > @@ -3155,10 +3155,10 @@ static int ext4_split_extent_at(handle_t *handle,
> >   	newblock = split - ee_block + ext4_ext_pblock(ex);
> > 
> >   	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
> > -	BUG_ON(!ext4_ext_is_uninitialized(ex) &&
> > +	BUG_ON(!ext4_ext_is_unwritten(ex) &&
> >   	       split_flag & (EXT4_EXT_MAY_ZEROOUT |
> > -			     EXT4_EXT_MARK_UNINIT1 |
> > -			     EXT4_EXT_MARK_UNINIT2));
> > +			     EXT4_EXT_MARK_UNWRIT1 |
> > +			     EXT4_EXT_MARK_UNWRIT2));
> > 
> >   	err = ext4_ext_get_access(handle, inode, path + depth);
> >   	if (err)
> > @@ -3170,8 +3170,8 @@ static int ext4_split_extent_at(handle_t *handle,
> >   		 * then we just change the state of the extent, and splitting
> >   		 * is not needed.
> >   		 */
> > -		if (split_flag & EXT4_EXT_MARK_UNINIT2)
> > -			ext4_ext_mark_uninitialized(ex);
> > +		if (split_flag & EXT4_EXT_MARK_UNWRIT2)
> > +			ext4_ext_mark_unwritten(ex);
> >   		else
> >   			ext4_ext_mark_initialized(ex);
> > 
> > @@ -3185,8 +3185,8 @@ static int ext4_split_extent_at(handle_t *handle,
> >   	/* case a */
> >   	memcpy(&orig_ex, ex, sizeof(orig_ex));
> >   	ex->ee_len = cpu_to_le16(split - ee_block);
> > -	if (split_flag & EXT4_EXT_MARK_UNINIT1)
> > -		ext4_ext_mark_uninitialized(ex);
> > +	if (split_flag & EXT4_EXT_MARK_UNWRIT1)
> > +		ext4_ext_mark_unwritten(ex);
> > 
> >   	/*
> >   	 * path may lead to new leaf, not to original leaf any more
> > @@ -3200,8 +3200,8 @@ static int ext4_split_extent_at(handle_t *handle,
> >   	ex2->ee_block = cpu_to_le32(split);
> >   	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
> >   	ext4_ext_store_pblock(ex2, newblock);
> > -	if (split_flag & EXT4_EXT_MARK_UNINIT2)
> > -		ext4_ext_mark_uninitialized(ex2);
> > +	if (split_flag & EXT4_EXT_MARK_UNWRIT2)
> > +		ext4_ext_mark_unwritten(ex2);
> > 
> >   	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
> >   	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
> > @@ -3278,7 +3278,7 @@ static int ext4_split_extent(handle_t *handle,
> >   	struct ext4_extent *ex;
> >   	unsigned int ee_len, depth;
> >   	int err = 0;
> > -	int uninitialized;
> > +	int unwritten;
> >   	int split_flag1, flags1;
> >   	int allocated = map->m_len;
> > 
> > @@ -3286,14 +3286,14 @@ static int ext4_split_extent(handle_t *handle,
> >   	ex = path[depth].p_ext;
> >   	ee_block = le32_to_cpu(ex->ee_block);
> >   	ee_len = ext4_ext_get_actual_len(ex);
> > -	uninitialized = ext4_ext_is_uninitialized(ex);
> > +	unwritten = ext4_ext_is_unwritten(ex);
> > 
> >   	if (map->m_lblk + map->m_len < ee_block + ee_len) {
> >   		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
> >   		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
> > -		if (uninitialized)
> > -			split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
> > -				       EXT4_EXT_MARK_UNINIT2;
> > +		if (unwritten)
> > +			split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
> > +				       EXT4_EXT_MARK_UNWRIT2;
> >   		if (split_flag & EXT4_EXT_DATA_VALID2)
> >   			split_flag1 |= EXT4_EXT_DATA_VALID1;
> >   		err = ext4_split_extent_at(handle, inode, path,
> > @@ -3313,15 +3313,15 @@ static int ext4_split_extent(handle_t *handle,
> >   		return PTR_ERR(path);
> >   	depth = ext_depth(inode);
> >   	ex = path[depth].p_ext;
> > -	uninitialized = ext4_ext_is_uninitialized(ex);
> > +	unwritten = ext4_ext_is_unwritten(ex);
> >   	split_flag1 = 0;
> > 
> >   	if (map->m_lblk >= ee_block) {
> >   		split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
> > -		if (uninitialized) {
> > -			split_flag1 |= EXT4_EXT_MARK_UNINIT1;
> > +		if (unwritten) {
> > +			split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
> >   			split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
> > -						     EXT4_EXT_MARK_UNINIT2);
> > +						     EXT4_EXT_MARK_UNWRIT2);
> >   		}
> >   		err = ext4_split_extent_at(handle, inode, path,
> >   				map->m_lblk, split_flag1, flags);
> > @@ -3336,16 +3336,16 @@ out:
> > 
> >   /*
> >    * This function is called by ext4_ext_map_blocks() if someone tries to
> > write
> > - * to an uninitialized extent. It may result in splitting the uninitialized
> > + * to an unwritten extent. It may result in splitting the unwritten
> >    * extent into multiple extents (up to three - one initialized and two
> > - * uninitialized).
> > + * unwritten).
> >    * There are three possibilities:
> >    *   a> There is no split required: Entire extent should be initialized
> >    *   b> Splits in two extents: Write is happening at either end of the
> > extent
> >    *   c> Splits in three extents: Somone is writing in middle of the extent
> >    *
> >    * Pre-conditions:
> > - *  - The extent pointed to by 'path' is uninitialized.
> > + *  - The extent pointed to by 'path' is unwritten.
> >    *  - The extent pointed to by 'path' contains a superset
> >    *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
> >    *
> > @@ -3391,12 +3391,12 @@ static int ext4_ext_convert_to_initialized(handle_t
> > *handle,
> >   	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
> > 
> >   	/* Pre-conditions */
> > -	BUG_ON(!ext4_ext_is_uninitialized(ex));
> > +	BUG_ON(!ext4_ext_is_unwritten(ex));
> >   	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
> > 
> >   	/*
> >   	 * Attempt to transfer newly initialized blocks from the currently
> > -	 * uninitialized extent to its neighbor. This is much cheaper
> > +	 * unwritten extent to its neighbor. This is much cheaper
> >   	 * than an insertion followed by a merge as those involve costly
> >   	 * memmove() calls. Transferring to the left is the common case in
> >   	 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
> > @@ -3432,7 +3432,7 @@ static int ext4_ext_convert_to_initialized(handle_t
> > *handle,
> >   		 * - C4: abut_ex can receive the additional blocks without
> >   		 *   overflowing the (initialized) length limit.
> >   		 */
> > -		if ((!ext4_ext_is_uninitialized(abut_ex)) &&		/*C1*/
> > +		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
> >   			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
> >   			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
> >   			(prev_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
> > @@ -3447,7 +3447,7 @@ static int ext4_ext_convert_to_initialized(handle_t
> > *handle,
> >   			ex->ee_block = cpu_to_le32(ee_block + map_len);
> >   			ext4_ext_store_pblock(ex, ee_pblk + map_len);
> >   			ex->ee_len = cpu_to_le16(ee_len - map_len);
> > -			ext4_ext_mark_uninitialized(ex); /* Restore the flag
> > */
> > +			ext4_ext_mark_unwritten(ex); /* Restore the flag */
> > 
> >   			/* Extend abut_ex by 'map_len' blocks */
> >   			abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
> > @@ -3478,7 +3478,7 @@ static int ext4_ext_convert_to_initialized(handle_t
> > *handle,
> >   		 * - C4: abut_ex can receive the additional blocks without
> >   		 *   overflowing the (initialized) length limit.
> >   		 */
> > -		if ((!ext4_ext_is_uninitialized(abut_ex)) &&		/*C1*/
> > +		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
> >   		    ((map->m_lblk + map_len) == next_lblk) &&		/*C2*/
> >   		    ((ee_pblk + ee_len) == next_pblk) &&		/*C3*/
> >   		    (next_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
> > @@ -3493,7 +3493,7 @@ static int ext4_ext_convert_to_initialized(handle_t
> > *handle,
> >   			abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
> >   			ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
> >   			ex->ee_len = cpu_to_le16(ee_len - map_len);
> > -			ext4_ext_mark_uninitialized(ex); /* Restore the flag
> > */
> > +			ext4_ext_mark_unwritten(ex); /* Restore the flag */
> > 
> >   			/* Extend abut_ex by 'map_len' blocks */
> >   			abut_ex->ee_len = cpu_to_le16(next_len + map_len);
> > @@ -3598,26 +3598,26 @@ out:
> >   /*
> >    * This function is called by ext4_ext_map_blocks() from
> >    * ext4_get_blocks_dio_write() when DIO to write
> > - * to an uninitialized extent.
> > + * to an unwritten extent.
> >    *
> > - * Writing to an uninitialized extent may result in splitting the
> > uninitialized
> > - * extent into multiple initialized/uninitialized extents (up to three)
> > + * Writing to an unwritten extent may result in splitting the unwritten
> > + * extent into multiple initialized/unwritten extents (up to three)
> >    * There are three possibilities:
> > - *   a> There is no split required: Entire extent should be uninitialized
> > + *   a> There is no split required: Entire extent should be unwritten
> >    *   b> Splits in two extents: Write is happening at either end of the
> > extent
> >    *   c> Splits in three extents: Somone is writing in middle of the extent
> >    *
> >    * This works the same way in the case of initialized -> unwritten
> > conversion.
> >    *
> >    * One of more index blocks maybe needed if the extent tree grow after
> > - * the uninitialized extent split. To prevent ENOSPC occur at the IO
> > - * complete, we need to split the uninitialized extent before DIO submit
> > - * the IO. The uninitialized extent called at this time will be split
> > - * into three uninitialized extent(at most). After IO complete, the part
> > + * the unwritten extent split. To prevent ENOSPC occur at the IO
> > + * complete, we need to split the unwritten extent before DIO submit
> > + * the IO. The unwritten extent called at this time will be split
> > + * into three unwritten extent(at most). After IO complete, the part
> >    * being filled will be convert to initialized by the end_io callback
> > function
> >    * via ext4_convert_unwritten_extents().
> >    *
> > - * Returns the size of uninitialized extent to be written on success.
> > + * Returns the size of unwritten extent to be written on success.
> >    */
> >   static int ext4_split_convert_extents(handle_t *handle,
> >   					struct inode *inode,
> > @@ -3655,7 +3655,7 @@ static int ext4_split_convert_extents(handle_t
> > *handle,
> >   	} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
> >   		split_flag |= ee_block + ee_len <= eof_block ?
> >   			      EXT4_EXT_MAY_ZEROOUT : 0;
> > -		split_flag |= (EXT4_EXT_MARK_UNINIT2 | EXT4_EXT_DATA_VALID2);
> > +		split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
> >   	}
> >   	flags |= EXT4_GET_BLOCKS_PRE_IO;
> >   	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
> > @@ -3699,8 +3699,8 @@ static int ext4_convert_initialized_extents(handle_t
> > *handle,
> >   	err = ext4_ext_get_access(handle, inode, path + depth);
> >   	if (err)
> >   		goto out;
> > -	/* first mark the extent as uninitialized */
> > -	ext4_ext_mark_uninitialized(ex);
> > +	/* first mark the extent as unwritten */
> > +	ext4_ext_mark_unwritten(ex);
> > 
> >   	/* note: ext4_ext_correct_indexes() isn't needed here because
> >   	 * borders are not changed
> > @@ -3960,10 +3960,10 @@ ext4_ext_convert_initialized_extent(handle_t
> > *handle, struct inode *inode,
> > 
> >   	/*
> >   	 * Make sure that the extent is no bigger than we support with
> > -	 * uninitialized extent
> > +	 * unwritten extent
> >   	 */
> > -	if (map->m_len > EXT_UNINIT_MAX_LEN)
> > -		map->m_len = EXT_UNINIT_MAX_LEN / 2;
> > +	if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
> > +		map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
> > 
> >   	ret = ext4_convert_initialized_extents(handle, inode, map,
> >   						path);
> > @@ -3982,7 +3982,7 @@ ext4_ext_convert_initialized_extent(handle_t *handle,
> > struct inode *inode,
> >   }
> > 
> >   static int
> > -ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode
> > *inode,
> > +ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
> >   			struct ext4_map_blocks *map,
> >   			struct ext4_ext_path *path, int flags,
> >   			unsigned int allocated, ext4_fsblk_t newblock)
> > @@ -3991,19 +3991,19 @@ ext4_ext_handle_uninitialized_extents(handle_t
> > *handle, struct inode *inode,
> >   	int err = 0;
> >   	ext4_io_end_t *io = ext4_inode_aio(inode);
> > 
> > -	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
> > +	ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
> >   		  "block %llu, max_blocks %u, flags %x, allocated %u\n",
> >   		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
> >   		  flags, allocated);
> >   	ext4_ext_show_leaf(inode, path);
> > 
> >   	/*
> > -	 * When writing into uninitialized space, we should not fail to
> > +	 * When writing into unwritten space, we should not fail to
> >   	 * allocate metadata blocks for the new extent block if needed.
> >   	 */
> >   	flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
> > 
> > -	trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
> > +	trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
> >   						    allocated, newblock);
> > 
> >   	/* get_block() before submit the IO, split the extent */
> > @@ -4046,7 +4046,7 @@ ext4_ext_handle_uninitialized_extents(handle_t
> > *handle, struct inode *inode,
> >   	 * repeat fallocate creation request
> >   	 * we already have an unwritten extent
> >   	 */
> > -	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
> > +	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
> >   		map->m_flags |= EXT4_MAP_UNWRITTEN;
> >   		goto map_out;
> >   	}
> > @@ -4297,7 +4297,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode
> > *inode,
> > 
> > 
> >   		/*
> > -		 * Uninitialized extents are treated as holes, except that
> > +		 * unwritten extents are treated as holes, except that
> >   		 * we split out initialized portions during a write.
> >   		 */
> >   		ee_len = ext4_ext_get_actual_len(ex);
> > @@ -4316,16 +4316,16 @@ int ext4_ext_map_blocks(handle_t *handle, struct
> > inode *inode,
> >   			 * If the extent is initialized check whether the
> >   			 * caller wants to convert it to unwritten.
> >   			 */
> > -			if ((!ext4_ext_is_uninitialized(ex)) &&
> > +			if ((!ext4_ext_is_unwritten(ex)) &&
> >   			    (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
> >   				allocated =
> > ext4_ext_convert_initialized_extent(
> >   						handle, inode, map, path,
> > flags,
> >   						allocated, newblock);
> >   				goto out2;
> > -			} else if (!ext4_ext_is_uninitialized(ex))
> > +			} else if (!ext4_ext_is_unwritten(ex))
> >   				goto out;
> > 
> > -			ret = ext4_ext_handle_uninitialized_extents(
> > +			ret = ext4_ext_handle_unwritten_extents(
> >   				handle, inode, map, path, flags,
> >   				allocated, newblock);
> >   			if (ret < 0)
> > @@ -4397,15 +4397,15 @@ int ext4_ext_map_blocks(handle_t *handle, struct
> > inode *inode,
> >   	/*
> >   	 * See if request is beyond maximum number of blocks we can have in
> >   	 * a single extent. For an initialized extent this limit is
> > -	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
> > -	 * EXT_UNINIT_MAX_LEN.
> > +	 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
> > +	 * EXT_UNWRITTEN_MAX_LEN.
> >   	 */
> >   	if (map->m_len > EXT_INIT_MAX_LEN &&
> > -	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
> > +	    !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
> >   		map->m_len = EXT_INIT_MAX_LEN;
> > -	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
> > -		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
> > -		map->m_len = EXT_UNINIT_MAX_LEN;
> > +	else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
> > +		 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
> > +		map->m_len = EXT_UNWRITTEN_MAX_LEN;
> > 
> >   	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
> >   	newex.ee_len = cpu_to_le16(map->m_len);
> > @@ -4453,13 +4453,13 @@ got_allocated_blocks:
> >   	/* try to insert new extent into found leaf and return */
> >   	ext4_ext_store_pblock(&newex, newblock + offset);
> >   	newex.ee_len = cpu_to_le16(ar.len);
> > -	/* Mark uninitialized */
> > -	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
> > -		ext4_ext_mark_uninitialized(&newex);
> > +	/* Mark unwritten */
> > +	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
> > +		ext4_ext_mark_unwritten(&newex);
> >   		map->m_flags |= EXT4_MAP_UNWRITTEN;
> >   		/*
> >   		 * io_end structure was created for every IO write to an
> > -		 * uninitialized extent. To avoid unnecessary conversion,
> > +		 * unwritten extent. To avoid unnecessary conversion,
> >   		 * here we flag the IO that really needs the conversion.
> >   		 * For non asycn direct IO case, flag the inode state
> >   		 * that we need to perform conversion when IO is done.
> > @@ -4592,9 +4592,9 @@ got_allocated_blocks:
> > 
> >   	/*
> >   	 * Cache the extent and update transaction to commit on fdatasync only
> > -	 * when it is _not_ an uninitialized extent.
> > +	 * when it is _not_ an unwritten extent.
> >   	 */
> > -	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
> > +	if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
> >   		ext4_update_inode_fsync_trans(handle, inode, 1);
> >   	else
> >   		ext4_update_inode_fsync_trans(handle, inode, 0);
> > @@ -4668,7 +4668,7 @@ static int ext4_alloc_file_blocks(struct file *file,
> > ext4_lblk_t offset,
> >   	 * that it doesn't get unnecessarily split into multiple
> >   	 * extents.
> >   	 */
> > -	if (len <= EXT_UNINIT_MAX_LEN)
> > +	if (len <= EXT_UNWRITTEN_MAX_LEN)
> >   		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
> > 
> >   	/*
> > @@ -4757,7 +4757,7 @@ static long ext4_zero_range(struct file *file, loff_t
> > offset,
> >   	else
> >   		max_blocks -= lblk;
> > 
> > -	flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT |
> > +	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
> >   		EXT4_GET_BLOCKS_CONVERT_UNWRITTEN;
> >   	if (mode & FALLOC_FL_KEEP_SIZE)
> >   		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
> > @@ -4900,7 +4900,7 @@ long ext4_fallocate(struct file *file, int mode,
> > loff_t offset, loff_t len)
> >   	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
> >   		- lblk;
> > 
> > -	flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
> > +	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
> >   	if (mode & FALLOC_FL_KEEP_SIZE)
> >   		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
> > 
> > diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
> > index 0a014a7..33682aa 100644
> > --- a/fs/ext4/extents_status.c
> > +++ b/fs/ext4/extents_status.c
> > @@ -433,7 +433,7 @@ static void ext4_es_insert_extent_ext_check(struct inode
> > *inode,
> >   		ee_start = ext4_ext_pblock(ex);
> >   		ee_len = ext4_ext_get_actual_len(ex);
> > 
> > -		ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0;
> > +		ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
> >   		es_status = ext4_es_is_unwritten(es) ? 1 : 0;
> > 
> >   		/*
> > diff --git a/fs/ext4/file.c b/fs/ext4/file.c
> > index 4e508fc..7622867 100644
> > --- a/fs/ext4/file.c
> > +++ b/fs/ext4/file.c
> > @@ -136,7 +136,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct
> > iovec *iov,
> >   		/*
> >   		 * 'err==len' means that all of blocks has been preallocated
> > no
> >   		 * matter they are initialized or not.  For excluding
> > -		 * uninitialized extents, we need to check m_flags.  There are
> > +		 * unwritten extents, we need to check m_flags.  There are
> >   		 * two conditions that indicate for initialized extents.
> >   		 * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is
> > returned;
> >   		 * 2) If we do a real lookup, non-flags are returned.
> > diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
> > index 0432c07..1922f48 100644
> > --- a/fs/ext4/inode.c
> > +++ b/fs/ext4/inode.c
> > @@ -489,8 +489,8 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
> >    * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
> >    * based files
> >    *
> > - * On success, it returns the number of blocks being mapped or allocate.
> > - * if create==0 and the blocks are pre-allocated and uninitialized block,
> > + * On success, it returns the number of blocks being mapped or allocated.
> > + * if create==0 and the blocks are pre-allocated and unwritten block,
> >    * the result buffer head is unmapped. If the create ==1, it will make
> > sure
> >    * the buffer head is mapped.
> >    *
> > @@ -618,7 +618,7 @@ found:
> >   	map->m_flags &= ~EXT4_MAP_FLAGS;
> > 
> >   	/*
> > -	 * New blocks allocate and/or writing to uninitialized extent
> > +	 * New blocks allocate and/or writing to unwritten extent
> >   	 * will possibly result in updating i_data, so we take
> >   	 * the write lock of i_data_sem, and call get_blocks()
> >   	 * with create == 1 flag.
> > @@ -2028,7 +2028,7 @@ static int mpage_process_page_bufs(struct
> > mpage_da_data *mpd,
> >    * Scan buffers corresponding to changed extent (we expect corresponding
> > pages
> >    * to be already locked) and update buffer state according to new extent
> > state.
> >    * We map delalloc buffers to their physical location, clear unwritten
> > bits,
> > - * and mark buffers as uninit when we perform writes to uninitialized
> > extents
> > + * and mark buffers as uninit when we perform writes to unwritten extents
> >    * and do extent conversion after IO is finished. If the last page is not
> > fully
> >    * mapped, we update @map to the next extent in the last page that needs
> >    * mapping. Otherwise we submit the page for IO.
> > @@ -2127,7 +2127,7 @@ static int mpage_map_one_extent(handle_t *handle,
> > struct mpage_da_data *mpd)
> >   	trace_ext4_da_write_pages_extent(inode, map);
> >   	/*
> >   	 * Call ext4_map_blocks() to allocate any delayed allocation blocks,
> > or
> > -	 * to convert an uninitialized extent to be initialized (in the case
> > +	 * to convert an unwritten extent to be initialized (in the case
> >   	 * where we have written into one or more preallocated blocks).  It is
> >   	 * possible that we're going to need more metadata blocks than
> >   	 * previously reserved. However we must not fail because we're in
> > @@ -3057,9 +3057,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t
> > offset,
> >    * preallocated extents, and those write extend the file, no need to
> >    * fall back to buffered IO.
> >    *
> > - * For holes, we fallocate those blocks, mark them as uninitialized
> > + * For holes, we fallocate those blocks, mark them as unwritten
> >    * If those blocks were preallocated, we mark sure they are split, but
> > - * still keep the range to write as uninitialized.
> > + * still keep the range to write as unwritten.
> >    *
> >    * The unwritten extents will be converted to written when DIO is
> > completed.
> >    * For async direct IO, since the IO may still pending when return, we
> > @@ -3111,12 +3111,12 @@ static ssize_t ext4_ext_direct_IO(int rw, struct
> > kiocb *iocb,
> >   	 * We could direct write to holes and fallocate.
> >   	 *
> >   	 * Allocated blocks to fill the hole are marked as
> > -	 * uninitialized to prevent parallel buffered read to expose
> > +	 * unwritten to prevent parallel buffered read to expose
> >   	 * the stale data before DIO complete the data IO.
> >   	 *
> >   	 * As to previously fallocated extents, ext4 get_block will
> >   	 * just simply mark the buffer mapped but still keep the
> > -	 * extents uninitialized.
> > +	 * extents unwritten.
> >   	 *
> >   	 * For non AIO case, we will convert those unwritten extents
> >   	 * to written after return back from blockdev_direct_IO.
> > diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
> > index 58ee7dc..1b809fe 100644
> > --- a/fs/ext4/move_extent.c
> > +++ b/fs/ext4/move_extent.c
> > @@ -57,8 +57,8 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
> >   static void
> >   copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest)
> >   {
> > -	if (ext4_ext_is_uninitialized(src))
> > -		ext4_ext_mark_uninitialized(dest);
> > +	if (ext4_ext_is_unwritten(src))
> > +		ext4_ext_mark_unwritten(dest);
> >   	else
> >   		dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest));
> >   }
> > @@ -593,14 +593,14 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
> >    * @inode:		inode in question
> >    * @from:		block offset of inode
> >    * @count:		block count to be checked
> > - * @uninit:		extents expected to be uninitialized
> > + * @unwritten:		extents expected to be unwritten
> >    * @err:		pointer to save error value
> >    *
> >    * Return 1 if all extents in range has expected type, and zero otherwise.
> >    */
> >   static int
> >   mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t
> > count,
> > -			  int uninit, int *err)
> > +		    int unwritten, int *err)
> >   {
> >   	struct ext4_ext_path *path = NULL;
> >   	struct ext4_extent *ext;
> > @@ -611,7 +611,7 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t
> > from, ext4_lblk_t count,
> >   		if (*err)
> >   			goto out;
> >   		ext = path[ext_depth(inode)].p_ext;
> > -		if (uninit != ext4_ext_is_uninitialized(ext))
> > +		if (unwritten != ext4_ext_is_unwritten(ext))
> >   			goto out;
> >   		from += ext4_ext_get_actual_len(ext);
> >   		ext4_ext_drop_refs(path);
> > @@ -894,7 +894,7 @@ out:
> >    * @orig_page_offset:		page index on original file
> >    * @data_offset_in_page:	block index where data swapping starts
> >    * @block_len_in_page:		the number of blocks to be swapped
> > - * @uninit:			orig extent is uninitialized or not
> > + * @unwritten:			orig extent is unwritten or not
> >    * @err:			pointer to save return value
> >    *
> >    * Save the data in original inode blocks and replace original inode
> > extents
> > @@ -905,7 +905,7 @@ out:
> >   static int
> >   move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
> >   		  pgoff_t orig_page_offset, int data_offset_in_page,
> > -		  int block_len_in_page, int uninit, int *err)
> > +		  int block_len_in_page, int unwritten, int *err)
> >   {
> >   	struct inode *orig_inode = file_inode(o_filp);
> >   	struct page *pagep[2] = {NULL, NULL};
> > @@ -962,27 +962,27 @@ again:
> >   	if (unlikely(*err < 0))
> >   		goto stop_journal;
> >   	/*
> > -	 * If orig extent was uninitialized it can become initialized
> > +	 * If orig extent was unwritten it can become initialized
> >   	 * at any time after i_data_sem was dropped, in order to
> >   	 * serialize with delalloc we have recheck extent while we
> >   	 * hold page's lock, if it is still the case data copy is not
> >   	 * necessary, just swap data blocks between orig and donor.
> >   	 */
> > -	if (uninit) {
> > +	if (unwritten) {
> >   		ext4_double_down_write_data_sem(orig_inode, donor_inode);
> >   		/* If any of extents in range became initialized we have to
> >   		 * fallback to data copying */
> > -		uninit = mext_check_coverage(orig_inode, orig_blk_offset,
> > -					     block_len_in_page, 1, err);
> > +		unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
> > +						block_len_in_page, 1, err);
> >   		if (*err)
> >   			goto drop_data_sem;
> > 
> > -		uninit &= mext_check_coverage(donor_inode, orig_blk_offset,
> > -					      block_len_in_page, 1, err);
> > +		unwritten &= mext_check_coverage(donor_inode, orig_blk_offset,
> > +						 block_len_in_page, 1, err);
> >   		if (*err)
> >   			goto drop_data_sem;
> > 
> > -		if (!uninit) {
> > +		if (!unwritten) {
> >   			ext4_double_up_write_data_sem(orig_inode,
> > donor_inode);
> >   			goto data_copy;
> >   		}
> > @@ -1259,7 +1259,7 @@ ext4_move_extents(struct file *o_filp, struct file
> > *d_filp,
> >   	int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
> >   	int data_offset_in_page;
> >   	int block_len_in_page;
> > -	int uninit;
> > +	int unwritten;
> > 
> >   	if (orig_inode->i_sb != donor_inode->i_sb) {
> >   		ext4_debug("ext4 move extent: The argument files "
> > @@ -1391,8 +1391,8 @@ ext4_move_extents(struct file *o_filp, struct file
> > *d_filp,
> >   		    !last_extent)
> >   			continue;
> > 
> > -		/* Is original extent is uninitialized */
> > -		uninit = ext4_ext_is_uninitialized(ext_prev);
> > +		/* Is original extent is unwritten */
> > +		unwritten = ext4_ext_is_unwritten(ext_prev);
> > 
> >   		data_offset_in_page = seq_start % blocks_per_page;
> > 
> > @@ -1432,8 +1432,8 @@ ext4_move_extents(struct file *o_filp, struct file
> > *d_filp,
> >   						o_filp, donor_inode,
> >   						orig_page_offset,
> >   						data_offset_in_page,
> > -						block_len_in_page, uninit,
> > -						&ret);
> > +						block_len_in_page,
> > +						unwritten, &ret);
> > 
> >   			/* Count how many blocks we have exchanged */
> >   			*moved_len += block_len_in_page;
> > diff --git a/fs/ext4/super.c b/fs/ext4/super.c
> > index f3c6670..ff65117 100644
> > --- a/fs/ext4/super.c
> > +++ b/fs/ext4/super.c
> > @@ -3337,7 +3337,7 @@ static ext4_fsblk_t
> > ext4_calculate_resv_clusters(struct super_block *sb)
> >   	 * By default we reserve 2% or 4096 clusters, whichever is smaller.
> >   	 * This should cover the situations where we can not afford to run
> >   	 * out of space like for example punch hole, or converting
> > -	 * uninitialized extents in delalloc path. In most cases such
> > +	 * unwritten extents in delalloc path. In most cases such
> >   	 * allocation would require 1, or 2 blocks, higher numbers are
> >   	 * very rare.
> >   	 */
> > diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
> > index be1c889..683cd75 100644
> > --- a/include/trace/events/ext4.h
> > +++ b/include/trace/events/ext4.h
> > @@ -45,7 +45,7 @@ struct extent_status;
> > 
> >   #define show_map_flags(flags) __print_flags(flags, "|",
> > \
> >   	{ EXT4_GET_BLOCKS_CREATE,		"CREATE" },		\
> > -	{ EXT4_GET_BLOCKS_UNINIT_EXT,		"UNINIT" },		\
> > +	{ EXT4_GET_BLOCKS_UNWRIT_EXT,		"UNWRIT" },		\
> >   	{ EXT4_GET_BLOCKS_DELALLOC_RESERVE,	"DELALLOC" },		\
> >   	{ EXT4_GET_BLOCKS_PRE_IO,		"PRE_IO" },		\
> >   	{ EXT4_GET_BLOCKS_CONVERT,		"CONVERT" },		\
> > @@ -1505,7 +1505,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
> >   	TP_ARGS(inode)
> >   );
> > 
> > -/* 'ux' is the uninitialized extent. */
> > +/* 'ux' is the unwritten extent. */
> >   TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
> >   	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
> >   		 struct ext4_extent *ux),
> > @@ -1541,7 +1541,7 @@ TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
> >   );
> > 
> >   /*
> > - * 'ux' is the uninitialized extent.
> > + * 'ux' is the unwritten extent.
> >    * 'ix' is the initialized extent to which blocks are transferred.
> >    */
> >   TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
> > @@ -1819,7 +1819,7 @@ DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
> >   	TP_ARGS(sb, group, start, len)
> >   );
> > 
> > -TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
> > +TRACE_EVENT(ext4_ext_handle_unwritten_extents,
> >   	TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags,
> >   		 unsigned int allocated, ext4_fsblk_t newblock),
> > 
> > 
> 
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ