lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 31 Jul 2023 09:26:26 +0800
From:   Wu Bo <bo.wu@...o.com>
To:     Jaegeuk Kim <jaegeuk@...nel.org>, Chao Yu <chao@...nel.org>
Cc:     linux-f2fs-devel@...ts.sourceforge.net,
        linux-kernel@...r.kernel.org, wubo.oduw@...il.com,
        Wu Bo <bo.wu@...o.com>
Subject: [PATCH 1/1] f2fs: move fiemap to use iomap framework

This patch has been tested with xfstests by running 'kvm-xfstests -c
f2fs -g auto' with and without this patch; no regressions were seen.

Some tests fail both before and after, and the test results are:
f2fs/default: 683 tests, 9 failures, 226 skipped, 30297 seconds
  Failures: generic/050 generic/064 generic/250 generic/252 generic/459
      generic/506 generic/563 generic/634 generic/635

Signed-off-by: Wu Bo <bo.wu@...o.com>
---
 fs/f2fs/data.c   | 238 ++++++++++++++++++++---------------------------
 fs/f2fs/f2fs.h   |   8 +-
 fs/f2fs/inline.c |  20 ++--
 3 files changed, 120 insertions(+), 146 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5882afe71d82..2d0be051a875 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1599,12 +1599,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
 	unsigned int maxblocks = map->m_len;
 	struct dnode_of_data dn;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
+	unsigned int cluster_mask = cluster_size - 1;
 	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
 	pgoff_t pgofs, end_offset, end;
-	int err = 0, ofs = 1;
-	unsigned int ofs_in_node, last_ofs_in_node;
+	int err = 0, ofs = 1, append = 0;
+	unsigned int ofs_in_node, last_ofs_in_node, ofs_in_cluster;
 	blkcnt_t prealloc;
-	block_t blkaddr;
+	block_t blkaddr, start_addr;
 	unsigned int start_pgofs;
 	int bidx = 0;
 	bool is_hole;
@@ -1691,6 +1693,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
 			map->m_flags |= F2FS_MAP_NEW;
 	} else if (is_hole) {
 		if (f2fs_compressed_file(inode) &&
+		    blkaddr == COMPRESS_ADDR &&
 		    f2fs_sanity_check_cluster(&dn) &&
 		    (flag != F2FS_GET_BLOCK_FIEMAP ||
 		     IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
@@ -1712,6 +1715,18 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
 					*map->m_next_pgofs = pgofs + 1;
 				goto sync_out;
 			}
+			if (f2fs_compressed_file(inode) &&
+			    blkaddr == COMPRESS_ADDR) {
+				/* split consecutive cluster */
+				if (map->m_len) {
+					dn.ofs_in_node--;
+					goto sync_out;
+				}
+				pgofs++;
+				dn.ofs_in_node++;
+				append = 1;
+				goto next_block;
+			}
 			break;
 		default:
 			/* for defragment case */
@@ -1750,6 +1765,10 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
 		goto sync_out;
 	}
 
+	/* 1 cluster 1 extent, split consecutive cluster */
+	if (append && !((dn.ofs_in_node + 1) & cluster_mask))
+		goto sync_out;
+
 skip:
 	dn.ofs_in_node++;
 	pgofs++;
@@ -1832,6 +1851,20 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
 		if (map->m_next_extent)
 			*map->m_next_extent = pgofs + 1;
 	}
+
+	if (flag == F2FS_GET_BLOCK_FIEMAP && f2fs_compressed_file(inode)) {
+		ofs_in_node = round_down(dn.ofs_in_node, cluster_size);
+		ofs_in_cluster = dn.ofs_in_node & cluster_mask;
+		start_addr = data_blkaddr(dn.inode, dn.node_page, ofs_in_node);
+		if (start_addr == COMPRESS_ADDR) {
+			map->m_flags |= F2FS_MAP_ENCODED;
+			map->m_len += append;
+			/* End of a cluster */
+			if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR)
+				map->m_len += cluster_size - ofs_in_cluster;
+		}
+	}
+
 	f2fs_put_dnode(&dn);
 unlock_out:
 	if (map->m_may_create) {
@@ -1952,37 +1985,10 @@ static int f2fs_xattr_fiemap(struct inode *inode,
 	return (err < 0 ? err : 0);
 }
 
-static loff_t max_inode_blocks(struct inode *inode)
-{
-	loff_t result = ADDRS_PER_INODE(inode);
-	loff_t leaf_count = ADDRS_PER_BLOCK(inode);
-
-	/* two direct node blocks */
-	result += (leaf_count * 2);
-
-	/* two indirect node blocks */
-	leaf_count *= NIDS_PER_BLOCK;
-	result += (leaf_count * 2);
-
-	/* one double indirect node block */
-	leaf_count *= NIDS_PER_BLOCK;
-	result += leaf_count;
-
-	return result;
-}
-
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		u64 start, u64 len)
 {
-	struct f2fs_map_blocks map;
-	sector_t start_blk, last_blk;
-	pgoff_t next_pgofs;
-	u64 logical = 0, phys = 0, size = 0;
-	u32 flags = 0;
-	int ret = 0;
-	bool compr_cluster = false, compr_appended;
-	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
-	unsigned int count_in_cluster = 0;
+	int ret;
 	loff_t maxbytes;
 
 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
@@ -1991,10 +1997,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 			return ret;
 	}
 
-	ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
-	if (ret)
-		return ret;
-
 	inode_lock(inode);
 
 	maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
@@ -2011,110 +2013,9 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		goto out;
 	}
 
-	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
-		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
-		if (ret != -EAGAIN)
-			goto out;
-	}
-
-	if (bytes_to_blks(inode, len) == 0)
-		len = blks_to_bytes(inode, 1);
-
-	start_blk = bytes_to_blks(inode, start);
-	last_blk = bytes_to_blks(inode, start + len - 1);
-
-next:
-	memset(&map, 0, sizeof(map));
-	map.m_lblk = start_blk;
-	map.m_len = bytes_to_blks(inode, len);
-	map.m_next_pgofs = &next_pgofs;
-	map.m_seg_type = NO_CHECK_TYPE;
-
-	if (compr_cluster) {
-		map.m_lblk += 1;
-		map.m_len = cluster_size - count_in_cluster;
-	}
-
-	ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
-	if (ret)
-		goto out;
-
-	/* HOLE */
-	if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
-		start_blk = next_pgofs;
-
-		if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
-						max_inode_blocks(inode)))
-			goto prep_next;
-
-		flags |= FIEMAP_EXTENT_LAST;
-	}
-
-	compr_appended = false;
-	/* In a case of compressed cluster, append this to the last extent */
-	if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
-			!(map.m_flags & F2FS_MAP_FLAGS))) {
-		compr_appended = true;
-		goto skip_fill;
-	}
-
-	if (size) {
-		flags |= FIEMAP_EXTENT_MERGED;
-		if (IS_ENCRYPTED(inode))
-			flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
-
-		ret = fiemap_fill_next_extent(fieinfo, logical,
-				phys, size, flags);
-		trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
-		if (ret)
-			goto out;
-		size = 0;
-	}
-
-	if (start_blk > last_blk)
-		goto out;
-
-skip_fill:
-	if (map.m_pblk == COMPRESS_ADDR) {
-		compr_cluster = true;
-		count_in_cluster = 1;
-	} else if (compr_appended) {
-		unsigned int appended_blks = cluster_size -
-						count_in_cluster + 1;
-		size += blks_to_bytes(inode, appended_blks);
-		start_blk += appended_blks;
-		compr_cluster = false;
-	} else {
-		logical = blks_to_bytes(inode, start_blk);
-		phys = __is_valid_data_blkaddr(map.m_pblk) ?
-			blks_to_bytes(inode, map.m_pblk) : 0;
-		size = blks_to_bytes(inode, map.m_len);
-		flags = 0;
-
-		if (compr_cluster) {
-			flags = FIEMAP_EXTENT_ENCODED;
-			count_in_cluster += map.m_len;
-			if (count_in_cluster == cluster_size) {
-				compr_cluster = false;
-				size += blks_to_bytes(inode, 1);
-			}
-		} else if (map.m_flags & F2FS_MAP_DELALLOC) {
-			flags = FIEMAP_EXTENT_UNWRITTEN;
-		}
-
-		start_blk += bytes_to_blks(inode, size);
-	}
+	ret = iomap_fiemap(inode, fieinfo, start, len, &f2fs_iomap_report_ops);
 
-prep_next:
-	cond_resched();
-	if (fatal_signal_pending(current))
-		ret = -EINTR;
-	else
-		goto next;
 out:
-	if (ret == 1)
-		ret = 0;
-
 	inode_unlock(inode);
 	return ret;
 }
@@ -4266,3 +4167,66 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
 const struct iomap_ops f2fs_iomap_ops = {
 	.iomap_begin	= f2fs_iomap_begin,
 };
+
+static int f2fs_iomap_begin_report(struct inode *inode, loff_t offset,
+				   loff_t length, unsigned int flags,
+				   struct iomap *iomap, struct iomap *srcmap)
+{
+	struct f2fs_map_blocks map = {0};
+	pgoff_t next_pgofs = 0;
+	int err;
+
+	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
+		err = f2fs_inline_data_fiemap(inode, iomap, offset, length);
+		if (err != -EAGAIN)
+			return err;
+	}
+
+	map.m_lblk = bytes_to_blks(inode, offset);
+	map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
+	map.m_next_pgofs = &next_pgofs;
+	map.m_seg_type = NO_CHECK_TYPE;
+	err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
+	if (err)
+		return err;
+	/*
+	 * When inline encryption is enabled, sometimes I/O to an encrypted file
+	 * has to be broken up to guarantee DUN contiguity.  Handle this by
+	 * limiting the length of the mapping returned.
+	 */
+	map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
+
+	if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
+		return -EINVAL;
+
+	iomap->offset = blks_to_bytes(inode, map.m_lblk);
+	if (map.m_flags & F2FS_MAP_FLAGS)
+		iomap->length = blks_to_bytes(inode, map.m_len);
+	else
+		iomap->length = blks_to_bytes(inode, next_pgofs) -
+				iomap->offset;
+
+	if (map.m_pblk == NEW_ADDR) {
+		/* f2fs treat pre-alloc & delay-alloc blocks the same way */
+		iomap->type = IOMAP_UNWRITTEN;
+		iomap->addr = IOMAP_NULL_ADDR;
+	} else if (map.m_pblk == NULL_ADDR) {
+		iomap->type = IOMAP_HOLE;
+		iomap->addr = IOMAP_NULL_ADDR;
+	} else {
+		iomap->type = IOMAP_MAPPED;
+		iomap->flags |= IOMAP_F_MERGED;
+		iomap->bdev = map.m_bdev;
+		iomap->addr = blks_to_bytes(inode, map.m_pblk);
+	}
+
+	cond_resched();
+	if (fatal_signal_pending(current))
+		return -EINTR;
+	else
+		return 0;
+}
+
+const struct iomap_ops f2fs_iomap_report_ops = {
+	.iomap_begin	= f2fs_iomap_begin_report,
+};
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c7cb2177b252..64a2bf58bd67 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -25,6 +25,7 @@
 #include <linux/quotaops.h>
 #include <linux/part_stat.h>
 #include <crypto/hash.h>
+#include <linux/iomap.h>
 
 #include <linux/fscrypt.h>
 #include <linux/fsverity.h>
@@ -680,8 +681,9 @@ struct extent_tree_info {
 #define F2FS_MAP_NEW		(1U << 0)
 #define F2FS_MAP_MAPPED		(1U << 1)
 #define F2FS_MAP_DELALLOC	(1U << 2)
+#define F2FS_MAP_ENCODED	(1U << 3)
 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
-				F2FS_MAP_DELALLOC)
+				F2FS_MAP_DELALLOC | F2FS_MAP_ENCODED)
 
 struct f2fs_map_blocks {
 	struct block_device *m_bdev;	/* for multi-device dio */
@@ -4109,6 +4111,7 @@ extern const struct inode_operations f2fs_symlink_inode_operations;
 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
 extern const struct inode_operations f2fs_special_inode_operations;
 extern struct kmem_cache *f2fs_inode_entry_slab;
+extern const struct iomap_ops f2fs_iomap_report_ops;
 
 /*
  * inline.c
@@ -4139,8 +4142,7 @@ bool f2fs_empty_inline_dir(struct inode *dir);
 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
 			struct fscrypt_str *fstr);
 int f2fs_inline_data_fiemap(struct inode *inode,
-			struct fiemap_extent_info *fieinfo,
-			__u64 start, __u64 len);
+		struct iomap *iomap, __u64 start, __u64 len);
 
 /*
  * shrinker.c
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 4638fee16a91..c1afc3414231 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -767,11 +767,9 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
 }
 
 int f2fs_inline_data_fiemap(struct inode *inode,
-		struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
+		struct iomap *iomap, __u64 start, __u64 len)
 {
 	__u64 byteaddr, ilen;
-	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
-		FIEMAP_EXTENT_LAST;
 	struct node_info ni;
 	struct page *ipage;
 	int err = 0;
@@ -792,8 +790,14 @@ int f2fs_inline_data_fiemap(struct inode *inode,
 	}
 
 	ilen = min_t(size_t, MAX_INLINE_DATA(inode), i_size_read(inode));
-	if (start >= ilen)
+	if (start >= ilen) {
+		/* stop iomap iterator */
+		iomap->offset = start;
+		iomap->length = len;
+		iomap->addr = IOMAP_NULL_ADDR;
+		iomap->type = IOMAP_HOLE;
 		goto out;
+	}
 	if (start + len < ilen)
 		ilen = start + len;
 	ilen -= start;
@@ -805,8 +809,12 @@ int f2fs_inline_data_fiemap(struct inode *inode,
 	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
 	byteaddr += (char *)inline_data_addr(inode, ipage) -
 					(char *)F2FS_INODE(ipage);
-	err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
-	trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err);
+	iomap->addr = byteaddr;
+	iomap->type = IOMAP_INLINE;
+	iomap->flags = 0;
+	iomap->offset = start;
+	iomap->length = ilen;
+
 out:
 	f2fs_put_page(ipage, 1);
 	return err;
-- 
2.35.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ