[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <48d152d713a9af21eefbfa69b26a7dd417f0897c.1667822611.git.ritesh.list@gmail.com>
Date: Mon, 7 Nov 2022 17:51:47 +0530
From: "Ritesh Harjani (IBM)" <ritesh.list@...il.com>
To: Theodore Ts'o <tytso@....edu>
Cc: linux-ext4@...r.kernel.org,
Harshad Shirwadkar <harshadshirwadkar@...il.com>,
Wang Shilong <wshilong@....com>,
Andreas Dilger <adilger.kernel@...ger.ca>,
Li Xi <lixi@....com>, Ritesh Harjani <ritesh.list@...il.com>
Subject: [RFCv1 59/72] e2fsck: update mmp block in one thread
From: Wang Shilong <wshilong@....com>
For multiple threads, different threads will try to
update mmp block at the same time, only allow one
thread to update it.
Signed-off-by: Wang Shilong <wshilong@....com>
Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@...il.com>
---
e2fsck/e2fsck.h | 1 +
e2fsck/pass1.c | 34 ++++++++++++++++++++++++++++++++--
2 files changed, 33 insertions(+), 2 deletions(-)
diff --git a/e2fsck/e2fsck.h b/e2fsck/e2fsck.h
index 01bd9d01..2dd7ba27 100644
--- a/e2fsck/e2fsck.h
+++ b/e2fsck/e2fsck.h
@@ -488,6 +488,7 @@ struct e2fsck_struct {
#ifdef HAVE_PTHREAD
__u32 fs_num_threads;
+ __u32 mmp_update_thread;
int fs_need_locking;
/* serialize fix operation for multiple threads */
pthread_rwlock_t fs_fix_rwlock;
diff --git a/e2fsck/pass1.c b/e2fsck/pass1.c
index 93cff80e..ed4275c3 100644
--- a/e2fsck/pass1.c
+++ b/e2fsck/pass1.c
@@ -1509,7 +1509,8 @@ void e2fsck_pass1_run(e2fsck_t ctx)
dgrp_t ra_group = 0;
struct ea_quota ea_ibody_quota;
struct process_inode_block *inodes_to_process;
- int process_inode_count;
+ int process_inode_count, check_mmp;
+ e2fsck_t global_ctx = ctx->global_ctx ? ctx->global_ctx : ctx;
init_resource_track(&rtrack, ctx->fs->io);
clear_problem_context(&pctx);
@@ -1672,8 +1673,30 @@ void e2fsck_pass1_run(e2fsck_t ctx)
#endif
while (1) {
+ check_mmp = 0;
e2fsck_pass1_check_lock(ctx);
- if (ino % (fs->super->s_inodes_per_group * 4) == 1) {
+#ifdef HAVE_PTHREAD
+ if (!global_ctx->mmp_update_thread) {
+ e2fsck_pass1_block_map_w_lock(ctx);
+ if (!global_ctx->mmp_update_thread) {
+ global_ctx->mmp_update_thread =
+ ctx->thread_info.et_thread_index + 1;
+ check_mmp = 1;
+ }
+ e2fsck_pass1_block_map_w_unlock(ctx);
+ }
+
+ /* only one active thread could update mmp block. */
+ e2fsck_pass1_block_map_r_lock(ctx);
+ if (global_ctx->mmp_update_thread ==
+ ctx->thread_info.et_thread_index + 1)
+ check_mmp = 1;
+ e2fsck_pass1_block_map_r_unlock(ctx);
+#else
+ check_mmp = 1;
+#endif
+
+ if (check_mmp && (ino % (fs->super->s_inodes_per_group * 4) == 1)) {
if (e2fsck_mmp_update(fs))
fatal_error(ctx, 0);
}
@@ -2437,6 +2460,13 @@ endit:
print_resource_track(ctx, _("Pass 1"), &rtrack, ctx->fs->io);
else
ctx->invalid_bitmaps++;
+#ifdef HAVE_PTHREAD
+ /* reset update_thread after this thread exit */
+ e2fsck_pass1_block_map_w_lock(ctx);
+ if (check_mmp)
+ global_ctx->mmp_update_thread = 0;
+ e2fsck_pass1_block_map_w_unlock(ctx);
+#endif
}
#ifdef HAVE_PTHREAD
--
2.37.3
Powered by blists - more mailing lists