[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170918090911.770828026@linuxfoundation.org>
Date: Mon, 18 Sep 2017 11:10:19 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
"Jonathan G. Underwood" <jonathan.underwood@...il.com>,
Ming Lei <ming.lei@...hat.com>, Shaohua Li <shli@...com>
Subject: [PATCH 4.13 51/52] md/raid1/10: reset bio allocated from mempool
4.13-stable review patch. If anyone has any objections, please let me know.
------------------
From: Shaohua Li <shli@...com>
commit 208410b546207cfc4c832635fa46419cfa86b4cd upstream.
Data allocated from mempool doesn't always get initialized, this happens when
the data is reused instead of fresh allocation. In the raid1/10 case, we must
reinitialize the bios.
Reported-by: Jonathan G. Underwood <jonathan.underwood@...il.com>
Fixes: f0250618361d(md: raid10: don't use bio's vec table to manage resync pages)
Fixes: 98d30c5812c3(md: raid1: don't use bio's vec table to manage resync pages)
Cc: Ming Lei <ming.lei@...hat.com>
Signed-off-by: Shaohua Li <shli@...com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/md/raid1.c | 19 ++++++++++++++++++-
drivers/md/raid10.c | 35 ++++++++++++++++++++++++++++++++---
2 files changed, 50 insertions(+), 4 deletions(-)
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2564,6 +2564,23 @@ static int init_resync(struct r1conf *co
return 0;
}
+static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
+{
+ struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+ struct resync_pages *rps;
+ struct bio *bio;
+ int i;
+
+ for (i = conf->poolinfo->raid_disks; i--; ) {
+ bio = r1bio->bios[i];
+ rps = bio->bi_private;
+ bio_reset(bio);
+ bio->bi_private = rps;
+ }
+ r1bio->master_bio = NULL;
+ return r1bio;
+}
+
/*
* perform a "sync" on one "block"
*
@@ -2649,7 +2666,7 @@ static sector_t raid1_sync_request(struc
bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+ r1_bio = raid1_alloc_init_r1buf(conf);
raise_barrier(conf, sector_nr);
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2798,6 +2798,35 @@ static int init_resync(struct r10conf *c
return 0;
}
+static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
+{
+ struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ struct rsync_pages *rp;
+ struct bio *bio;
+ int nalloc;
+ int i;
+
+ if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
+ nalloc = conf->copies; /* resync */
+ else
+ nalloc = 2; /* recovery */
+
+ for (i = 0; i < nalloc; i++) {
+ bio = r10bio->devs[i].bio;
+ rp = bio->bi_private;
+ bio_reset(bio);
+ bio->bi_private = rp;
+ bio = r10bio->devs[i].repl_bio;
+ if (bio) {
+ rp = bio->bi_private;
+ bio_reset(bio);
+ bio->bi_private = rp;
+ }
+ }
+ return r10bio;
+}
+
/*
* perform a "sync" on one "block"
*
@@ -3027,7 +3056,7 @@ static sector_t raid10_sync_request(stru
atomic_inc(&mreplace->nr_pending);
rcu_read_unlock();
- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0;
raise_barrier(conf, rb2 != NULL);
atomic_set(&r10_bio->remaining, 0);
@@ -3236,7 +3265,7 @@ static sector_t raid10_sync_request(stru
}
if (sync_blocks < max_sync)
max_sync = sync_blocks;
- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0;
r10_bio->mddev = mddev;
@@ -4360,7 +4389,7 @@ static sector_t reshape_request(struct m
read_more:
/* Now schedule reads for blocks from sector_nr to last */
- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0;
raise_barrier(conf, sectors_done != 0);
atomic_set(&r10_bio->remaining, 0);
Powered by blists - more mailing lists