[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190408073308.3082-1-hans@owltronix.com>
Date: Mon, 8 Apr 2019 09:33:08 +0200
From: hans@...tronix.com
To: Matias Bjorling <mb@...htnvm.io>
Cc: javier@...igon.com, Igor Konopko <igor.j.konopko@...el.com>,
Klaus Jensen <klaus.jensen@...xlabs.com>,
linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
Hans Holmberg <hans.holmberg@...xlabs.com>
Subject: [PATCH] lightnvm: pblk: fix crash in pblk_end_partial_read due to multipage bvecs
From: Hans Holmberg <hans.holmberg@...xlabs.com>
Ever since '07173c3ec276 ("block: enable multipage bvecs")' we
need to handle bios with multipage bvecs in pblk.
Currently, a multipage bvec results in a crash[1].
Fix this by using bvec iterators in stead of direct bvec indexing.
Also add a dcache flush, for the same reasons as in:
'6e6e811d747b ("block: Add missing flush_dcache_page() call")'
[1] https://github.com/OpenChannelSSD/linux/issues/61
Reported-by: Klaus Jensen <klaus.jensen@...xlabs.com>
Signed-off-by: Hans Holmberg <hans.holmberg@...xlabs.com>
---
It ain't pretty, but let's fix the breakage while waiting for Igor's
partial read cleanup to be ready.
drivers/lightnvm/pblk-read.c | 50 ++++++++++++++++++++----------------
1 file changed, 28 insertions(+), 22 deletions(-)
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index f08f7d9bd3be..27f8a76d8bd8 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -230,14 +230,14 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
struct pblk_sec_meta *meta;
struct bio *new_bio = rqd->bio;
struct bio *bio = pr_ctx->orig_bio;
- struct bio_vec src_bv, dst_bv;
void *meta_list = rqd->meta_list;
- int bio_init_idx = pr_ctx->bio_init_idx;
unsigned long *read_bitmap = pr_ctx->bitmap;
+ struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
+ struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
int nr_secs = pr_ctx->orig_nr_secs;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
void *src_p, *dst_p;
- int hole, i;
+ int bit, i;
if (unlikely(nr_holes == 1)) {
struct ppa_addr ppa;
@@ -256,33 +256,39 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
/* Fill the holes in the original bio */
i = 0;
- hole = find_first_zero_bit(read_bitmap, nr_secs);
- do {
- struct pblk_line *line;
+ for (bit = 0; bit < nr_secs; bit++) {
+ if (!test_bit(bit, read_bitmap)) {
+ struct bio_vec dst_bv, src_bv;
+ struct pblk_line *line;
- line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
- kref_put(&line->ref, pblk_line_put);
+ line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
+ kref_put(&line->ref, pblk_line_put);
- meta = pblk_get_meta(pblk, meta_list, hole);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
+ meta = pblk_get_meta(pblk, meta_list, bit);
+ meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
- src_bv = new_bio->bi_io_vec[i++];
- dst_bv = bio->bi_io_vec[bio_init_idx + hole];
+ dst_bv = bio_iter_iovec(bio, orig_iter);
+ src_bv = bio_iter_iovec(new_bio, new_iter);
- src_p = kmap_atomic(src_bv.bv_page);
- dst_p = kmap_atomic(dst_bv.bv_page);
+ src_p = kmap_atomic(src_bv.bv_page);
+ dst_p = kmap_atomic(dst_bv.bv_page);
- memcpy(dst_p + dst_bv.bv_offset,
- src_p + src_bv.bv_offset,
- PBLK_EXPOSED_PAGE_SIZE);
+ memcpy(dst_p + dst_bv.bv_offset,
+ src_p + src_bv.bv_offset,
+ PBLK_EXPOSED_PAGE_SIZE);
- kunmap_atomic(src_p);
- kunmap_atomic(dst_p);
+ kunmap_atomic(src_p);
+ kunmap_atomic(dst_p);
- mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
+ flush_dcache_page(dst_bv.bv_page);
+ mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
- hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
- } while (hole < nr_secs);
+ bio_advance_iter(new_bio, &new_iter,
+ PBLK_EXPOSED_PAGE_SIZE);
+ i++;
+ }
+ bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
+ }
bio_put(new_bio);
kfree(pr_ctx);
--
2.17.1
Powered by blists - more mailing lists