[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250304121918.3159388-3-linan666@huaweicloud.com>
Date: Tue, 4 Mar 2025 20:19:16 +0800
From: linan666@...weicloud.com
To: axboe@...nel.dk,
song@...nel.org,
yukuai3@...wei.com,
hare@...e.de,
martin.petersen@...cle.com
Cc: linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-raid@...r.kernel.org,
linan666@...weicloud.com,
yangerkun@...wei.com,
zhangxiaoxu5@...wei.com,
wanghai38@...wei.com
Subject: [PATCH 2/4] md: make raid logical_block_size configurable
From: Li Nan <linan122@...wei.com>
Previously, raid array used the maximum logical_block_size (LBS) from
all member disks. Adding disks with larger LBS during operation could
unexpectedly increase RAID's LBS, risking existing partition data loss.
Simply restricting larger-LBS disks is inflexible. In some scenarios,
only disks with 512 LBS are available currently, but later, disks with
4k LBS may be added to the array.
Making LBS configurable is the best way to solve this scenario.
After this patch, the raid will:
- handle LBS fields in disk metadata read/write operations.
- introduce a new sysfs 'md/logical_block_size' for LBS configuration.
Future mdadm should support setting LBS via metadata field during RAID
creation and the new sysfs. Though the kernel allows runtime LBS changes,
users should avoid modifying it after creating partitions or filesystems
to prevent compatibility issues.
Signed-off-by: Li Nan <linan122@...wei.com>
---
drivers/md/md.h | 1 +
include/uapi/linux/raid/md_p.h | 6 ++-
drivers/md/md-linear.c | 1 +
drivers/md/md.c | 74 ++++++++++++++++++++++++++++++++++
drivers/md/raid0.c | 1 +
drivers/md/raid1.c | 1 +
drivers/md/raid10.c | 1 +
drivers/md/raid5.c | 1 +
8 files changed, 84 insertions(+), 2 deletions(-)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index def808064ad8..96bd10998ae0 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -403,6 +403,7 @@ struct mddev {
sector_t array_sectors; /* exported array size */
int external_size; /* size managed
* externally */
+ unsigned int logical_block_size;
__u64 events;
/* If the last 'event' was simply a clean->dirty transition, and
* we didn't write it to the spares, then it is safe and simple
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index ff47b6f0ba0f..ad1c84e772ba 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -180,7 +180,8 @@ typedef struct mdp_superblock_s {
__u32 delta_disks; /* 15 change in number of raid_disks */
__u32 new_layout; /* 16 new layout */
__u32 new_chunk; /* 17 new chunk size (bytes) */
- __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18];
+ __u32 logical_block_size; /* same as q->limits->logical_block_size */
+ __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 19];
/*
* Personality information
@@ -291,7 +292,8 @@ struct mdp_superblock_1 {
__le64 resync_offset; /* data before this offset (from data_offset) known to be in sync */
__le32 sb_csum; /* checksum up to devs[max_dev] */
__le32 max_dev; /* size of devs[] array to consider */
- __u8 pad3[64-32]; /* set to 0 when writing */
+ __le32 logical_block_size; /* same as q->limits->logical_block_size */
+ __u8 pad3[64-36]; /* set to 0 when writing */
/* device state information. Indexed by dev_number.
* 2 bytes per device
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 369aed044b40..c197a90a6bd5 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
+ lim.logical_block_size = mddev->logical_block_size;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 827646b3eb59..cf3d8ff807a7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1335,6 +1335,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
mddev->reshape_backwards = 0;
+ mddev->logical_block_size = sb->logical_block_size;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
@@ -1497,6 +1498,7 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
+ sb->logical_block_size = mddev->logical_block_size;
if (mddev->bitmap && mddev->bitmap_info.file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
@@ -1831,6 +1833,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
+ mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
@@ -2040,6 +2043,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
+ sb->logical_block_size = cpu_to_le32(mddev->logical_block_size);
if (test_bit(FailFast, &rdev->flags))
sb->devflags |= FailFast1;
else
@@ -5630,6 +5634,65 @@ static struct md_sysfs_entry md_serialize_policy =
__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
serialize_policy_store);
+static int mddev_set_logical_block_size(struct mddev *mddev,
+ unsigned int lbs)
+{
+ int err = 0;
+ struct queue_limits lim;
+
+ if (blk_validate_block_size(lbs) ||
+ queue_logical_block_size(mddev->gendisk->queue) >= lbs) {
+ pr_err("%s: incompatible logical_block_size, can not set\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+
+ lim = queue_limits_start_update(mddev->gendisk->queue);
+ if (blk_set_block_size(&lim, lbs, 0))
+ pr_warn("%s: logical_block_size changes, data may be lost\n",
+ mdname(mddev));
+ err = queue_limits_commit_update(mddev->gendisk->queue, &lim);
+ if (err)
+ return err;
+
+ mddev->logical_block_size = lbs;
+ md_update_sb(mddev, 1);
+
+ return 0;
+}
+
+static ssize_t
+lbs_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%u\n", mddev->logical_block_size);
+}
+
+static ssize_t
+lbs_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned int lbs;
+ int err;
+
+ err = kstrtouint(buf, 10, &lbs);
+ if (err < 0)
+ return err;
+
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ err = -EBUSY;
+ if (mddev->pers)
+ goto unlock;
+
+ err = mddev_set_logical_block_size(mddev, lbs);
+
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
+}
+
+static struct md_sysfs_entry md_logical_block_size =
+__ATTR(logical_block_size, S_IRUGO|S_IWUSR, lbs_show, lbs_store);
static struct attribute *md_default_attrs[] = {
&md_level.attr,
@@ -5662,6 +5725,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_last_scan_mode.attr,
&md_mismatches.attr,
+ &md_logical_block_size.attr,
&md_sync_min.attr,
&md_sync_max.attr,
&md_sync_speed.attr,
@@ -5760,6 +5824,7 @@ int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
unsigned int flags)
{
struct md_rdev *rdev;
+ unsigned int lbs = mddev->logical_block_size;
rdev_for_each(rdev, mddev) {
queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset,
@@ -5768,6 +5833,14 @@ int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
!queue_limits_stack_integrity_bdev(lim, rdev->bdev))
return -EINVAL;
}
+ if (lbs) {
+ if (lbs != queue_logical_block_size(mddev->gendisk->queue))
+ pr_warn("%s: logical_block_size is changed, before: %u, now: %u\n",
+ mdname(mddev), lbs,
+ queue_logical_block_size(mddev->gendisk->queue));
+ } else {
+ mddev->logical_block_size = queue_logical_block_size(mddev->gendisk->queue);
+ }
return 0;
}
@@ -6377,6 +6450,7 @@ static void md_clean(struct mddev *mddev)
mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0;
mddev->layout = 0;
+ mddev->logical_block_size = 0;
mddev->max_disks = 0;
mddev->events = 0;
mddev->can_decrease_events = 0;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 70bcc3cdf2cd..83e330e30fc2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.logical_block_size = mddev->logical_block_size;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 10ea3af40991..5d8a718af9b0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3217,6 +3217,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.logical_block_size = mddev->logical_block_size;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 15b9ae5bf84d..085188f85785 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4016,6 +4016,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.logical_block_size = mddev->logical_block_size;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
lim.features |= BLK_FEAT_ATOMIC_WRITES;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5c79429acc64..b11e0ae25c2f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7736,6 +7736,7 @@ static int raid5_set_limits(struct mddev *mddev)
stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9));
md_init_stacking_limits(&lim);
+ lim.logical_block_size = mddev->logical_block_size;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
--
2.39.2
Powered by blists - more mailing lists