[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230825030956.1527023-2-yukuai1@huaweicloud.com>
Date: Fri, 25 Aug 2023 11:09:50 +0800
From: Yu Kuai <yukuai1@...weicloud.com>
To: agk@...hat.com, snitzer@...nel.org, dm-devel@...hat.com,
song@...nel.org, xni@...hat.com
Cc: linux-kernel@...r.kernel.org, linux-raid@...r.kernel.org,
yukuai3@...wei.com, yukuai1@...weicloud.com, yi.zhang@...wei.com,
yangerkun@...wei.com
Subject: [PATCH -next v2 1/7] md: initialize 'active_io' while allocating mddev
From: Yu Kuai <yukuai3@...wei.com>
'active_io' is used for mddev_suspend() and it's initialized in
md_run(), this restrict that 'reconfig_mutex' must be held and
"mddev->pers" must be set before calling mddev_suspend().
Initialize 'active_io' early so that mddev_suspend() is safe to call
once mddev is allocated, this will be helpful to refactor
mddev_suspend() in following patches.
Signed-off-by: Yu Kuai <yukuai3@...wei.com>
---
drivers/md/dm-raid.c | 7 +++++-
drivers/md/md.c | 53 +++++++++++++++++++++++++++-----------------
drivers/md/md.h | 3 ++-
3 files changed, 41 insertions(+), 22 deletions(-)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 1f22bef27841..020598c10db0 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -749,7 +749,11 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return ERR_PTR(-ENOMEM);
}
- mddev_init(&rs->md);
+ if (mddev_init(&rs->md)) {
+ kfree(rs);
+ ti->error = "Cannot initialize raid context";
+ return ERR_PTR(-ENOMEM);
+ }
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
@@ -798,6 +802,7 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
+ mddev_destroy(&rs->md);
kfree(rs);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e5bca33f559..9974cf45f9d0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -639,8 +639,20 @@ void mddev_put(struct mddev *mddev)
static void md_safemode_timeout(struct timer_list *t);
static void md_start_sync(struct work_struct *ws);
-void mddev_init(struct mddev *mddev)
+static void active_io_release(struct percpu_ref *ref)
{
+ struct mddev *mddev = container_of(ref, struct mddev, active_io);
+
+ wake_up(&mddev->sb_wait);
+}
+
+int mddev_init(struct mddev *mddev)
+{
+
+ if (percpu_ref_init(&mddev->active_io, active_io_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+ return -ENOMEM;
+
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->sync_mutex);
@@ -665,9 +677,17 @@ void mddev_init(struct mddev *mddev)
INIT_WORK(&mddev->sync_work, md_start_sync);
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mddev_init);
+void mddev_destroy(struct mddev *mddev)
+{
+ percpu_ref_exit(&mddev->active_io);
+}
+EXPORT_SYMBOL_GPL(mddev_destroy);
+
static struct mddev *mddev_find_locked(dev_t unit)
{
struct mddev *mddev;
@@ -711,13 +731,16 @@ static struct mddev *mddev_alloc(dev_t unit)
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
- mddev_init(new);
+
+ error = mddev_init(new);
+ if (error)
+ goto out_free_new;
spin_lock(&all_mddevs_lock);
if (unit) {
error = -EEXIST;
if (mddev_find_locked(unit))
- goto out_free_new;
+ goto out_destroy_new;
new->unit = unit;
if (MAJOR(unit) == MD_MAJOR)
new->md_minor = MINOR(unit);
@@ -728,7 +751,7 @@ static struct mddev *mddev_alloc(dev_t unit)
error = -ENODEV;
new->unit = mddev_alloc_unit();
if (!new->unit)
- goto out_free_new;
+ goto out_destroy_new;
new->md_minor = MINOR(new->unit);
new->hold_active = UNTIL_STOP;
}
@@ -736,8 +759,11 @@ static struct mddev *mddev_alloc(dev_t unit)
list_add(&new->all_mddevs, &all_mddevs);
spin_unlock(&all_mddevs_lock);
return new;
-out_free_new:
+
+out_destroy_new:
spin_unlock(&all_mddevs_lock);
+ mddev_destroy(new);
+out_free_new:
kfree(new);
return ERR_PTR(error);
}
@@ -748,6 +774,7 @@ static void mddev_free(struct mddev *mddev)
list_del(&mddev->all_mddevs);
spin_unlock(&all_mddevs_lock);
+ mddev_destroy(mddev);
kfree(mddev);
}
@@ -5778,12 +5805,6 @@ static void md_safemode_timeout(struct timer_list *t)
}
static int start_dirty_degraded;
-static void active_io_release(struct percpu_ref *ref)
-{
- struct mddev *mddev = container_of(ref, struct mddev, active_io);
-
- wake_up(&mddev->sb_wait);
-}
int md_run(struct mddev *mddev)
{
@@ -5864,15 +5885,10 @@ int md_run(struct mddev *mddev)
nowait = nowait && bdev_nowait(rdev->bdev);
}
- err = percpu_ref_init(&mddev->active_io, active_io_release,
- PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
- if (err)
- return err;
-
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
- goto exit_active_io;
+ return err;
}
if (!bioset_initialized(&mddev->sync_set)) {
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
@@ -6069,8 +6085,6 @@ int md_run(struct mddev *mddev)
bioset_exit(&mddev->sync_set);
exit_bio_set:
bioset_exit(&mddev->bio_set);
-exit_active_io:
- percpu_ref_exit(&mddev->active_io);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
@@ -6286,7 +6300,6 @@ static void __md_stop(struct mddev *mddev)
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- percpu_ref_exit(&mddev->active_io);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
bioset_exit(&mddev->io_clone_set);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 64d05cb65287..f6b64b0e92c7 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -795,7 +795,8 @@ extern int md_integrity_register(struct mddev *mddev);
extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
-extern void mddev_init(struct mddev *mddev);
+extern int mddev_init(struct mddev *mddev);
+extern void mddev_destroy(struct mddev *mddev);
struct mddev *md_alloc(dev_t dev, char *name);
void mddev_put(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
--
2.39.2
Powered by blists - more mailing lists