[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <xovdviolzjnf6bwbvw4heskfmeasrku7s32xzzljtyomv55do3@s7uy5jb5itqq>
Date: Thu, 13 Nov 2025 13:44:57 +0900
From: Sergey Senozhatsky <senozhatsky@...omium.org>
To: Yuwen Chen <ywen.chen@...mail.com>
Cc: axboe@...nel.dk, akpm@...ux-foundation.org, bgeffon@...gle.com,
licayy@...look.com, linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, liumartin@...gle.com, minchan@...nel.org, richardycc@...gle.com,
Sergey Senozhatsky <senozhatsky@...omium.org>
Subject: Re: [PATCH v4] zram: Implement multi-page write-back
On (25/11/13 11:20), Sergey Senozhatsky wrote:
> On (25/11/06 09:49), Yuwen Chen wrote:
> > +static struct zram_wb_request *zram_writeback_next_request(struct zram_wb_request *pool,
> > + int pool_cnt, int *cnt_off)
> > +{
> > + struct zram_wb_request *req = NULL;
> > + int i = 0;
> > +
> > + for (i = *cnt_off; i < pool_cnt + *cnt_off; i++) {
> > + req = &pool[i % pool_cnt];
> > + if (!req->page) {
> > + /* This memory should be freed by the caller. */
> > + req->page = alloc_page(GFP_KERNEL);
> > + if (!req->page)
> > + continue;
> > + }
> > +
> > + if (!test_and_set_bit(ZRAM_WB_REQUEST_ALLOCATED, &req->flags)) {
> > + *cnt_off = (i + 1) % pool_cnt;
> > + return req;
> > + }
> > + }
> > + return NULL;
> > +}
>
> So I wonder if things will look simpler (is this the word I'm looking
> for?) if you just have two lists for requests: one list for completed/idle
> requests and one list for in-flight requests (and you move requests
> around accordingly). Then you don't need to iterate the pool and check
> flags, you just can check list_empty(&idle_requests) and take the first
> (front) element.
OK, so this is (look below) roughly what I want it to look like.
It's closer (in sense of logic and approach) to what we do for
post-processing, and I think it's easier to understand.
1. factored out a new structure zram_wb_ctl, similar to zram_pp_ctl;
2. zram_wb_ctl is initialized outside of zram_writeback_slots(),
because this function is purely about pp-slot writeback. wb_ctl
is passed to zram_writeback_slots() as an argument, just like pp_ctl;
3. requests move between two lists: idle and inflight requests;
4. factored out and unified the wait-for-completion logic;
TODO: writeback_batch_size device attr.
Please take a look? Does this make sense to you, do this
work for you?
---
drivers/block/zram/zram_drv.c | 312 ++++++++++++++++++++++++++--------
1 file changed, 244 insertions(+), 68 deletions(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index a43074657531..60c3a61c7ee8 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -734,20 +734,195 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
-{
- unsigned long blk_idx = 0;
- struct page *page = NULL;
+struct zram_wb_ctl {
+ struct list_head idle_reqs;
+ struct list_head inflight_reqs;
+
+ atomic_t num_inflight;
+ struct completion done;
+ struct blk_plug plug;
+};
+
+struct zram_wb_req {
+ unsigned long blk_idx;
+ struct page *page;
struct zram_pp_slot *pps;
struct bio_vec bio_vec;
struct bio bio;
- int ret = 0, err;
+
+ struct list_head entry;
+};
+
+static void release_wb_req(struct zram_wb_req *req)
+{
+ __free_page(req->page);
+ kfree(req);
+}
+
+static void release_wb_ctl(struct zram_wb_ctl *wb_ctl)
+{
+ /* We should never have inflight requests at this point */
+ WARN_ON(!list_empty(&wb_ctl->inflight_reqs));
+
+ while (!list_empty(&wb_ctl->idle_reqs)) {
+ struct zram_wb_req *req;
+
+ req = list_first_entry(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ list_del(&req->entry);
+ release_wb_req(req);
+ }
+
+ kfree(wb_ctl);
+}
+
+/* should be a module param */
+#define ZRAM_WB_REQ_CNT (32)
+
+static struct zram_wb_ctl *init_wb_ctl(void)
+{
+ struct zram_wb_ctl *wb_ctl;
+ int i;
+
+ wb_ctl = kmalloc(sizeof(*wb_ctl), GFP_KERNEL);
+ if (!wb_ctl)
+ return NULL;
+
+ INIT_LIST_HEAD(&wb_ctl->idle_reqs);
+ INIT_LIST_HEAD(&wb_ctl->inflight_reqs);
+ atomic_set(&wb_ctl->num_inflight, 0);
+ init_completion(&wb_ctl->done);
+
+ for (i = 0; i < ZRAM_WB_REQ_CNT; i++) {
+ struct zram_wb_req *req;
+
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ goto release_wb_ctl;
+
+ req->page = alloc_page(GFP_KERNEL);
+ if (!req->page) {
+ kfree(req);
+ goto release_wb_ctl;
+ }
+
+ INIT_LIST_HEAD(&req->entry);
+ list_add(&req->entry, &wb_ctl->idle_reqs);
+ }
+
+ return wb_ctl;
+
+release_wb_ctl:
+ release_wb_ctl(wb_ctl);
+ return NULL;
+}
+
+static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
+{
u32 index;
+ int err;
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ index = req->pps->index;
+ release_pp_slot(zram, req->pps);
+ req->pps = NULL;
+
+ err = blk_status_to_errno(req->bio.bi_status);
+ if (err)
+ return err;
+
+ atomic64_inc(&zram->stats.bd_writes);
+ zram_slot_lock(zram, index);
+ /*
+ * We release slot lock during writeback so slot can change under us:
+ * slot_free() or slot_free() and zram_write_page(). In both cases
+ * slot loses ZRAM_PP_SLOT flag. No concurrent post-processing can
+ * set ZRAM_PP_SLOT on such slots until current post-processing
+ * finishes.
+ */
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
+ goto out;
+
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_WB);
+ zram_set_handle(zram, index, req->blk_idx);
+ atomic64_inc(&zram->stats.pages_stored);
+ spin_lock(&zram->wb_limit_lock);
+ if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+ zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
+ spin_unlock(&zram->wb_limit_lock);
+
+out:
+ zram_slot_unlock(zram, index);
+ return 0;
+}
+
+static void zram_writeback_endio(struct bio *bio)
+{
+ struct zram_wb_ctl *wb_ctl = bio->bi_private;
+
+ if (atomic_dec_return(&wb_ctl->num_inflight) == 0)
+ complete(&wb_ctl->done);
+}
+
+static void zram_submit_wb_request(struct zram_wb_ctl *wb_ctl,
+ struct zram_wb_req *req)
+{
+ atomic_inc(&wb_ctl->num_inflight);
+ list_add_tail(&req->entry, &wb_ctl->inflight_reqs);
+ submit_bio(&req->bio);
+}
+
+static struct zram_wb_req *select_idle_req(struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req = NULL;
+
+ if (!list_empty(&wb_ctl->idle_reqs)) {
+ req = list_first_entry(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ list_del(&req->entry);
+ }
+ return req;
+}
+
+static int zram_wb_wait_for_completion(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl)
+{
+ int ret = 0;
+
+ if (atomic_read(&wb_ctl->num_inflight) == 0)
+ return 0;
+
+ wait_for_completion_io(&wb_ctl->done);
+ reinit_completion(&wb_ctl->done);
+
+ while (!list_empty(&wb_ctl->inflight_reqs)) {
+ struct zram_wb_req *req;
+ int err;
+
+ req = list_first_entry(&wb_ctl->inflight_reqs,
+ struct zram_wb_req, entry);
+ list_move(&req->entry, &wb_ctl->idle_reqs);
+
+ err = zram_writeback_complete(zram, req);
+ if (err)
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int zram_writeback_slots(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl,
+ struct zram_pp_ctl *ctl)
+{
+ struct zram_wb_req *req = NULL;
+ unsigned long blk_idx = 0;
+ struct zram_pp_slot *pps;
+ int ret = 0, err;
+ u32 index = 0;
+
+ blk_start_plug(&wb_ctl->plug);
while ((pps = select_pp_slot(ctl))) {
spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
@@ -757,15 +932,34 @@ static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
}
spin_unlock(&zram->wb_limit_lock);
+ while (!req) {
+ req = select_idle_req(wb_ctl);
+ if (req)
+ break;
+
+ blk_finish_plug(&wb_ctl->plug);
+ err = zram_wb_wait_for_completion(zram, wb_ctl);
+ blk_start_plug(&wb_ctl->plug);
+ /*
+ * BIO errors are not fatal, we continue and simply
+ * attempt to writeback the remaining objects (pages).
+ * At the same time we need to signal user-space that
+ * some writes (at least one, but also could be all of
+ * them) were not successful and we do so by returning
+ * the most recent BIO error.
+ */
+ if (err)
+ ret = err;
+ }
+
if (!blk_idx) {
blk_idx = alloc_block_bdev(zram);
- if (!blk_idx) {
+ if (blk_idx) {
ret = -ENOSPC;
break;
}
}
- index = pps->index;
zram_slot_lock(zram, index);
/*
* scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
@@ -775,67 +969,41 @@ static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
*/
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
- if (zram_read_from_zspool(zram, page, index))
+ if (zram_read_from_zspool(zram, req->page, index))
goto next;
zram_slot_unlock(zram, index);
- bio_init(&bio, zram->bdev, &bio_vec, 1,
+ req->blk_idx = blk_idx;
+ req->pps = pps;
+ bio_init(&req->bio, zram->bdev, &req->bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
- bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- __bio_add_page(&bio, page, PAGE_SIZE, 0);
-
- /*
- * XXX: A single page IO would be inefficient for write
- * but it would be not bad as starter.
- */
- err = submit_bio_wait(&bio);
- if (err) {
- release_pp_slot(zram, pps);
- /*
- * BIO errors are not fatal, we continue and simply
- * attempt to writeback the remaining objects (pages).
- * At the same time we need to signal user-space that
- * some writes (at least one, but also could be all of
- * them) were not successful and we do so by returning
- * the most recent BIO error.
- */
- ret = err;
- continue;
- }
+ req->bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
+ req->bio.bi_end_io = zram_writeback_endio;
+ req->bio.bi_private = wb_ctl;
+ __bio_add_page(&req->bio, req->page, PAGE_SIZE, 0);
- atomic64_inc(&zram->stats.bd_writes);
- zram_slot_lock(zram, index);
- /*
- * Same as above, we release slot lock during writeback so
- * slot can change under us: slot_free() or slot_free() and
- * reallocation (zram_write_page()). In both cases slot loses
- * ZRAM_PP_SLOT flag. No concurrent post-processing can set
- * ZRAM_PP_SLOT on such slots until current post-processing
- * finishes.
- */
- if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
- goto next;
-
- zram_free_page(zram, index);
- zram_set_flag(zram, index, ZRAM_WB);
- zram_set_handle(zram, index, blk_idx);
+ zram_submit_wb_request(wb_ctl, req);
blk_idx = 0;
- atomic64_inc(&zram->stats.pages_stored);
- spin_lock(&zram->wb_limit_lock);
- if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
- zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
- spin_unlock(&zram->wb_limit_lock);
+ req = NULL;
+ continue;
+
next:
zram_slot_unlock(zram, index);
release_pp_slot(zram, pps);
-
cond_resched();
}
- if (blk_idx)
- free_block_bdev(zram, blk_idx);
- if (page)
- __free_page(page);
+ /*
+ * Selected idle req, but never submitted it due to some error or
+ * wb limit.
+ */
+ if (req)
+ release_wb_req(req);
+
+ blk_finish_plug(&wb_ctl->plug);
+ err = zram_wb_wait_for_completion(zram, wb_ctl);
+ if (err)
+ ret = err;
return ret;
}
@@ -948,7 +1116,8 @@ static ssize_t writeback_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
u64 nr_pages = zram->disksize >> PAGE_SHIFT;
unsigned long lo = 0, hi = nr_pages;
- struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_ctl *pp_ctl = NULL;
+ struct zram_wb_ctl *wb_ctl = NULL;
char *args, *param, *val;
ssize_t ret = len;
int err, mode = 0;
@@ -970,8 +1139,14 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- ctl = init_pp_ctl();
- if (!ctl) {
+ pp_ctl = init_pp_ctl();
+ if (!pp_ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ wb_ctl = init_wb_ctl();
+ if (!wb_ctl) {
ret = -ENOMEM;
goto release_init_lock;
}
@@ -1000,7 +1175,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
break;
}
@@ -1011,7 +1186,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
break;
}
@@ -1022,7 +1197,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
continue;
}
@@ -1033,17 +1208,18 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
continue;
}
}
- err = zram_writeback_slots(zram, ctl);
+ err = zram_writeback_slots(zram, wb_ctl, pp_ctl);
if (err)
ret = err;
release_init_lock:
- release_pp_ctl(zram, ctl);
+ release_pp_ctl(zram, pp_ctl);
+ release_wb_ctl(wb_ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
--
2.51.2.1041.gc1ab5b90ca-goog
Powered by blists - more mailing lists