[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241223081044.1126291-5-yi.sun@unisoc.com>
Date: Mon, 23 Dec 2024 16:10:44 +0800
From: Yi Sun <yi.sun@...soc.com>
To: <chao@...nel.org>, <jaegeuk@...nel.org>
CC: <sunyibuaa@...il.com>, <yi.sun@...soc.com>,
<linux-f2fs-devel@...ts.sourceforge.net>,
<linux-kernel@...r.kernel.org>, <niuzhiguo84@...il.com>,
<Hao_hao.Wang@...soc.com>, <ke.wang@...soc.com>
Subject: [PATCH v4 4/4] f2fs: Optimize f2fs_truncate_data_blocks_range()
Function f2fs_invalidate_blocks() can process continuous
blocks at a time, so f2fs_truncate_data_blocks_range() is
optimized to use the new functionality of
f2fs_invalidate_blocks().
Signed-off-by: Yi Sun <yi.sun@...soc.com>
---
fs/f2fs/file.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 68 insertions(+), 4 deletions(-)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 81764b10840b..9980d17ef9f5 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -612,6 +612,15 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
return finish_preallocate_blocks(inode);
}
+static bool check_curr_block_is_consecutive(struct f2fs_sb_info *sbi,
+ block_t curr, block_t end)
+{
+ if (curr - end == 1 || curr == end)
+ return true;
+ else
+ return false;
+}
+
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
@@ -621,8 +630,27 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
+ /*
+ * Temporary record location.
+ * When the current @blkaddr and @blkaddr_end can be processed
+ * together, update the value of @blkaddr_end.
+ * When it is detected that current @blkaddr is not continues with
+ * @blkaddr_end, it is necessary to process continues blocks
+ * range [blkaddr_start, blkaddr_end].
+ */
+ block_t blkaddr_start, blkaddr_end;
+ /*.
+ * To avoid processing various invalid data blocks.
+ * Because @blkaddr_start and @blkaddr_end may be assigned
+ * NULL_ADDR or invalid data blocks, @last_valid is used to
+ * record this situation.
+ */
+ bool last_valid = false;
+ /* Process the last @blkaddr separately? */
+ bool last_one = true;
addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
+ blkaddr_start = blkaddr_end = le32_to_cpu(*addr);
/* Assumption: truncation starts with cluster */
for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
@@ -638,24 +666,60 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
}
if (blkaddr == NULL_ADDR)
- continue;
+ goto next;
f2fs_set_data_blkaddr(dn, NULL_ADDR);
if (__is_valid_data_blkaddr(blkaddr)) {
if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
- continue;
+ goto next;
if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
DATA_GENERIC_ENHANCE))
- continue;
+ goto next;
if (compressed_cluster)
valid_blocks++;
}
- f2fs_invalidate_blocks(sbi, blkaddr, 1);
+
+ if (check_curr_block_is_consecutive(sbi, blkaddr, blkaddr_end)) {
+ /*
+ * The current block @blkaddr is continuous with
+ * @blkaddr_end, so @blkaddr_end is updated.
+ * And the f2fs_invalidate_blocks() is skipped
+ * until @blkaddr that cannot be processed
+ * together is encountered.
+ */
+ blkaddr_end = blkaddr;
+ if (count == 1)
+ last_one = false;
+ else
+ goto skip_invalid;
+ }
+
+ f2fs_invalidate_blocks(sbi, blkaddr_start,
+ blkaddr_end - blkaddr_start + 1);
+ blkaddr_start = blkaddr_end = blkaddr;
+
+ if (count == 1 && last_one)
+ f2fs_invalidate_blocks(sbi, blkaddr, 1);
+
+skip_invalid:
+ last_valid = true;
if (!released || blkaddr != COMPRESS_ADDR)
nr_free++;
+
+ continue;
+
+next:
+ /* If consecutive blocks have been recorded, we need to process them. */
+ if (last_valid == true)
+ f2fs_invalidate_blocks(sbi, blkaddr_start,
+ blkaddr_end - blkaddr_start + 1);
+
+ blkaddr_start = blkaddr_end = le32_to_cpu(*(addr + 1));
+ last_valid = false;
+
}
if (compressed_cluster)
--
2.25.1
Powered by blists - more mailing lists