[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAP-5=fWQvjdOV=TgdGrScGJ9Qe+eqRfh5jM8i3uiJeQSi9Yt3w@mail.gmail.com>
Date: Tue, 19 Mar 2024 13:03:17 -0700
From: Ian Rogers <irogers@...gle.com>
To: Kuan-Wei Chiu <visitorckw@...il.com>
Cc: colyli@...e.de, kent.overstreet@...ux.dev, msakai@...hat.com,
peterz@...radead.org, mingo@...hat.com, acme@...nel.org, namhyung@...nel.org,
akpm@...ux-foundation.org, bfoster@...hat.com, mark.rutland@....com,
alexander.shishkin@...ux.intel.com, jolsa@...nel.org, adrian.hunter@...el.com,
jserv@...s.ncku.edu.tw, linux-bcache@...r.kernel.org,
dm-devel@...ts.linux.dev, linux-bcachefs@...r.kernel.org,
linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 13/13] bcachefs: Remove heap-related macros and switch to
generic min_heap
On Tue, Mar 19, 2024 at 11:01 AM Kuan-Wei Chiu <visitorckw@...il.com> wrote:
>
> Drop the heap-related macros from bcachefs and replaces them with the
> generic min_heap implementation from include/linux. This change
> improves code readability by using functions instead of macros.
>
> Link: https://lkml.kernel.org/ioyfizrzq7w7mjrqcadtzsfgpuntowtjdw5pgn4qhvsdp4mqqg@nrlek5vmisbu
> Signed-off-by: Kuan-Wei Chiu <visitorckw@...il.com>
> ---
> fs/bcachefs/clock.c | 53 +++++++++++-----
> fs/bcachefs/clock_types.h | 2 +-
> fs/bcachefs/ec.c | 99 ++++++++++++++++++-----------
> fs/bcachefs/ec_types.h | 2 +-
> fs/bcachefs/util.h | 127 +++-----------------------------------
> 5 files changed, 109 insertions(+), 174 deletions(-)
>
> diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
> index 363644451106..7a7d13f7a629 100644
> --- a/fs/bcachefs/clock.c
> +++ b/fs/bcachefs/clock.c
> @@ -6,16 +6,29 @@
> #include <linux/kthread.h>
> #include <linux/preempt.h>
>
> -static inline long io_timer_cmp(io_timer_heap *h,
> - struct io_timer *l,
> - struct io_timer *r)
> +static inline bool io_timer_cmp(const void *l, const void *r, void *args)
Should args here and below be marked with the attribute __always_unused?
Thanks,
Ian
> {
> - return l->expire - r->expire;
> + struct io_timer *_l = (struct io_timer *)l;
> + struct io_timer *_r = (struct io_timer *)r;
> +
> + return _l->expire >= _r->expire;
> +}
> +
> +static inline void io_timer_swp(void *l, void *r, void *args)
> +{
> + struct io_timer *_l = (struct io_timer *)l;
> + struct io_timer *_r = (struct io_timer *)r;
> +
> + swap(*_l, *_r);
> }
>
> void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
> {
> size_t i;
> + const struct min_heap_callbacks callbacks = {
> + .less = io_timer_cmp,
> + .swp = io_timer_swp,
> + };
>
> spin_lock(&clock->timer_lock);
>
> @@ -26,11 +39,11 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
> return;
> }
>
> - for (i = 0; i < clock->timers.used; i++)
> - if (clock->timers.data[i] == timer)
> + for (i = 0; i < clock->timers.heap.nr; i++)
> + if (min_heap_peek(&clock->timers)[i] == timer)
> goto out;
>
> - BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
> + BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL));
> out:
> spin_unlock(&clock->timer_lock);
> }
> @@ -38,12 +51,16 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
> void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
> {
> size_t i;
> + const struct min_heap_callbacks callbacks = {
> + .less = io_timer_cmp,
> + .swp = io_timer_swp,
> + };
>
> spin_lock(&clock->timer_lock);
>
> - for (i = 0; i < clock->timers.used; i++)
> - if (clock->timers.data[i] == timer) {
> - heap_del(&clock->timers, i, io_timer_cmp, NULL);
> + for (i = 0; i < clock->timers.heap.nr; i++)
> + if (min_heap_peek(&clock->timers)[i] == timer) {
> + min_heap_pop(&clock->timers, &callbacks, NULL);
> break;
> }
>
> @@ -131,12 +148,16 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
> unsigned long now)
> {
> struct io_timer *ret = NULL;
> + const struct min_heap_callbacks callbacks = {
> + .less = io_timer_cmp,
> + .swp = io_timer_swp,
> + };
>
> spin_lock(&clock->timer_lock);
>
> - if (clock->timers.used &&
> - time_after_eq(now, clock->timers.data[0]->expire))
> - heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
> + if (clock->timers.heap.nr &&
> + time_after_eq(now, min_heap_peek(&clock->timers)[0]->expire))
> + min_heap_pop(&clock->timers, &callbacks, NULL);
>
> spin_unlock(&clock->timer_lock);
>
> @@ -161,10 +182,10 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
> spin_lock(&clock->timer_lock);
> now = atomic64_read(&clock->now);
>
> - for (i = 0; i < clock->timers.used; i++)
> + for (i = 0; i < clock->timers.heap.nr; i++)
> prt_printf(out, "%ps:\t%li\n",
> - clock->timers.data[i]->fn,
> - clock->timers.data[i]->expire - now);
> + min_heap_peek(&clock->timers)[i]->fn,
> + min_heap_peek(&clock->timers)[i]->expire - now);
> spin_unlock(&clock->timer_lock);
> --out->atomic;
> }
> diff --git a/fs/bcachefs/clock_types.h b/fs/bcachefs/clock_types.h
> index 5fae0012d808..b02b24b9d74f 100644
> --- a/fs/bcachefs/clock_types.h
> +++ b/fs/bcachefs/clock_types.h
> @@ -23,7 +23,7 @@ struct io_timer {
> /* Amount to buffer up on a percpu counter */
> #define IO_CLOCK_PCPU_SECTORS 128
>
> -typedef HEAP(struct io_timer *) io_timer_heap;
> +typedef MIN_HEAP(struct io_timer *, io_timer_heap) io_timer_heap;
>
> struct io_clock {
> atomic64_t now;
> diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
> index b98e2c2b8bf0..7b6a31237503 100644
> --- a/fs/bcachefs/ec.c
> +++ b/fs/bcachefs/ec.c
> @@ -860,14 +860,14 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
> {
> ec_stripes_heap n, *h = &c->ec_stripes_heap;
>
> - if (idx >= h->size) {
> + if (idx >= h->heap.size) {
> if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
> return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
>
> mutex_lock(&c->ec_stripes_heap_lock);
> - if (n.size > h->size) {
> - memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
> - n.used = h->used;
> + if (n.heap.size > h->heap.size) {
> + memcpy(min_heap_peek(&n), min_heap_peek(h), h->heap.nr * sizeof(*min_heap_peek(h)));
> + n.heap.nr = h->heap.nr;
> swap(*h, n);
> }
> mutex_unlock(&c->ec_stripes_heap_lock);
> @@ -958,20 +958,21 @@ static u64 stripe_idx_to_delete(struct bch_fs *c)
>
> lockdep_assert_held(&c->ec_stripes_heap_lock);
>
> - if (h->used &&
> - h->data[0].blocks_nonempty == 0 &&
> - !bch2_stripe_is_open(c, h->data[0].idx))
> - return h->data[0].idx;
> + if (h->heap.nr &&
> + min_heap_peek(h)->blocks_nonempty == 0 &&
> + !bch2_stripe_is_open(c, min_heap_peek(h)->idx))
> + return min_heap_peek(h)->idx;
>
> return 0;
> }
>
> -static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
> - struct ec_stripe_heap_entry l,
> - struct ec_stripe_heap_entry r)
> +static inline bool ec_stripes_heap_cmp(const void *l, const void *r, void *args)
> {
> - return ((l.blocks_nonempty > r.blocks_nonempty) -
> - (l.blocks_nonempty < r.blocks_nonempty));
> + struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l;
> + struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r;
> +
> + return ((_l->blocks_nonempty > _r->blocks_nonempty) >=
> + (_l->blocks_nonempty < _r->blocks_nonempty));
> }
>
> static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
> @@ -979,7 +980,21 @@ static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
> {
> struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
>
> - genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
> + genradix_ptr(&c->stripes, min_heap_peek(h)[i].idx)->heap_idx = i;
> +}
> +
> +static inline void ec_stripes_heap_swap(void *l, void *r, void *h)
> +{
> + struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l;
> + struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r;
> + ec_stripes_heap *_h = (ec_stripes_heap *)h;
> + size_t i = _l - min_heap_peek(_h);
> + size_t j = _r - min_heap_peek(_h);
> +
> + ec_stripes_heap_set_backpointer(_h, i);
> + ec_stripes_heap_set_backpointer(_h, j);
> +
> + swap(*_l, *_r);
> }
>
> static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
> @@ -987,34 +1002,43 @@ static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
> ec_stripes_heap *h = &c->ec_stripes_heap;
> struct stripe *m = genradix_ptr(&c->stripes, idx);
>
> - BUG_ON(m->heap_idx >= h->used);
> - BUG_ON(h->data[m->heap_idx].idx != idx);
> + BUG_ON(m->heap_idx >= h->heap.nr);
> + BUG_ON(min_heap_peek(h)[m->heap_idx].idx != idx);
> }
>
> void bch2_stripes_heap_del(struct bch_fs *c,
> struct stripe *m, size_t idx)
> {
> + const struct min_heap_callbacks callbacks = {
> + .less = ec_stripes_heap_cmp,
> + .swp = ec_stripes_heap_swap,
> + };
> +
> mutex_lock(&c->ec_stripes_heap_lock);
> heap_verify_backpointer(c, idx);
>
> - heap_del(&c->ec_stripes_heap, m->heap_idx,
> - ec_stripes_heap_cmp,
> - ec_stripes_heap_set_backpointer);
> + min_heap_del(&c->ec_stripes_heap, m->heap_idx, &callbacks, &c->ec_stripes_heap);
> mutex_unlock(&c->ec_stripes_heap_lock);
> }
>
> void bch2_stripes_heap_insert(struct bch_fs *c,
> struct stripe *m, size_t idx)
> {
> + const struct min_heap_callbacks callbacks = {
> + .less = ec_stripes_heap_cmp,
> + .swp = ec_stripes_heap_swap,
> + };
> +
> mutex_lock(&c->ec_stripes_heap_lock);
> - BUG_ON(heap_full(&c->ec_stripes_heap));
> + BUG_ON(min_heap_full(&c->ec_stripes_heap));
>
> - heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
> + genradix_ptr(&c->stripes, idx)->heap_idx = c->ec_stripes_heap.heap.nr;
> + min_heap_push(&c->ec_stripes_heap, &((struct ec_stripe_heap_entry) {
> .idx = idx,
> .blocks_nonempty = m->blocks_nonempty,
> }),
> - ec_stripes_heap_cmp,
> - ec_stripes_heap_set_backpointer);
> + &callbacks,
> + &c->ec_stripes_heap);
>
> heap_verify_backpointer(c, idx);
> mutex_unlock(&c->ec_stripes_heap_lock);
> @@ -1026,17 +1050,20 @@ void bch2_stripes_heap_update(struct bch_fs *c,
> ec_stripes_heap *h = &c->ec_stripes_heap;
> bool do_deletes;
> size_t i;
> + const struct min_heap_callbacks callbacks = {
> + .less = ec_stripes_heap_cmp,
> + .swp = ec_stripes_heap_swap,
> + };
>
> mutex_lock(&c->ec_stripes_heap_lock);
> heap_verify_backpointer(c, idx);
>
> - h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
> + min_heap_peek(h)[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
>
> i = m->heap_idx;
> - heap_sift_up(h, i, ec_stripes_heap_cmp,
> - ec_stripes_heap_set_backpointer);
> - heap_sift_down(h, i, ec_stripes_heap_cmp,
> - ec_stripes_heap_set_backpointer);
> +
> + min_heap_sift_up(h, i, &callbacks, &c->ec_stripes_heap);
> + min_heapify(h, i, &callbacks, &c->ec_stripes_heap);
>
> heap_verify_backpointer(c, idx);
>
> @@ -1828,12 +1855,12 @@ static s64 get_existing_stripe(struct bch_fs *c,
> return -1;
>
> mutex_lock(&c->ec_stripes_heap_lock);
> - for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
> + for (heap_idx = 0; heap_idx < h->heap.nr; heap_idx++) {
> /* No blocks worth reusing, stripe will just be deleted: */
> - if (!h->data[heap_idx].blocks_nonempty)
> + if (!min_heap_peek(h)[heap_idx].blocks_nonempty)
> continue;
>
> - stripe_idx = h->data[heap_idx].idx;
> + stripe_idx = min_heap_peek(h)[heap_idx].idx;
>
> m = genradix_ptr(&c->stripes, stripe_idx);
>
> @@ -2159,14 +2186,14 @@ void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
> size_t i;
>
> mutex_lock(&c->ec_stripes_heap_lock);
> - for (i = 0; i < min_t(size_t, h->used, 50); i++) {
> - m = genradix_ptr(&c->stripes, h->data[i].idx);
> + for (i = 0; i < min_t(size_t, h->heap.nr, 50); i++) {
> + m = genradix_ptr(&c->stripes, min_heap_peek(h)[i].idx);
>
> - prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
> - h->data[i].blocks_nonempty,
> + prt_printf(out, "%zu %u/%u+%u", min_heap_peek(h)[i].idx,
> + min_heap_peek(h)[i].blocks_nonempty,
> m->nr_blocks - m->nr_redundant,
> m->nr_redundant);
> - if (bch2_stripe_is_open(c, h->data[i].idx))
> + if (bch2_stripe_is_open(c, min_heap_peek(h)[i].idx))
> prt_str(out, " open");
> prt_newline(out);
> }
> diff --git a/fs/bcachefs/ec_types.h b/fs/bcachefs/ec_types.h
> index 976426da3a12..2ed67431a81c 100644
> --- a/fs/bcachefs/ec_types.h
> +++ b/fs/bcachefs/ec_types.h
> @@ -36,6 +36,6 @@ struct ec_stripe_heap_entry {
> unsigned blocks_nonempty;
> };
>
> -typedef HEAP(struct ec_stripe_heap_entry) ec_stripes_heap;
> +typedef MIN_HEAP(struct ec_stripe_heap_entry, ec_stripes_heap) ec_stripes_heap;
>
> #endif /* _BCACHEFS_EC_TYPES_H */
> diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
> index 7ffbddb80400..c599dc5276ac 100644
> --- a/fs/bcachefs/util.h
> +++ b/fs/bcachefs/util.h
> @@ -8,6 +8,7 @@
> #include <linux/errno.h>
> #include <linux/freezer.h>
> #include <linux/kernel.h>
> +#include <linux/min_heap.h>
> #include <linux/sched/clock.h>
> #include <linux/llist.h>
> #include <linux/log2.h>
> @@ -54,134 +55,20 @@ static inline size_t buf_pages(void *p, size_t len)
> PAGE_SIZE);
> }
>
> -#define HEAP(type) \
> -struct { \
> - size_t size, used; \
> - type *data; \
> -}
> -
> -#define DECLARE_HEAP(type, name) HEAP(type) name
> -
> #define init_heap(heap, _size, gfp) \
> ({ \
> - (heap)->used = 0; \
> - (heap)->size = (_size); \
> - (heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\
> - (gfp)); \
> -})
> -
> -#define free_heap(heap) \
> -do { \
> - kvfree((heap)->data); \
> - (heap)->data = NULL; \
> -} while (0)
> -
> -#define heap_set_backpointer(h, i, _fn) \
> -do { \
> - void (*fn)(typeof(h), size_t) = _fn; \
> - if (fn) \
> - fn(h, i); \
> -} while (0)
> -
> -#define heap_swap(h, i, j, set_backpointer) \
> -do { \
> - swap((h)->data[i], (h)->data[j]); \
> - heap_set_backpointer(h, i, set_backpointer); \
> - heap_set_backpointer(h, j, set_backpointer); \
> -} while (0)
> -
> -#define heap_peek(h) \
> -({ \
> - EBUG_ON(!(h)->used); \
> - (h)->data[0]; \
> -})
> -
> -#define heap_full(h) ((h)->used == (h)->size)
> -
> -#define heap_sift_down(h, i, cmp, set_backpointer) \
> -do { \
> - size_t _c, _j = i; \
> - \
> - for (; _j * 2 + 1 < (h)->used; _j = _c) { \
> - _c = _j * 2 + 1; \
> - if (_c + 1 < (h)->used && \
> - cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \
> - _c++; \
> - \
> - if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \
> - break; \
> - heap_swap(h, _c, _j, set_backpointer); \
> - } \
> -} while (0)
> -
> -#define heap_sift_up(h, i, cmp, set_backpointer) \
> -do { \
> - while (i) { \
> - size_t p = (i - 1) / 2; \
> - if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \
> - break; \
> - heap_swap(h, i, p, set_backpointer); \
> - i = p; \
> - } \
> -} while (0)
> -
> -#define __heap_add(h, d, cmp, set_backpointer) \
> -({ \
> - size_t _i = (h)->used++; \
> - (h)->data[_i] = d; \
> - heap_set_backpointer(h, _i, set_backpointer); \
> - \
> - heap_sift_up(h, _i, cmp, set_backpointer); \
> - _i; \
> -})
> -
> -#define heap_add(h, d, cmp, set_backpointer) \
> -({ \
> - bool _r = !heap_full(h); \
> - if (_r) \
> - __heap_add(h, d, cmp, set_backpointer); \
> - _r; \
> + void *data = kvmalloc(_size * sizeof(*min_heap_peek(heap)), (gfp));\
> + min_heap_init(heap, data, _size); \
> + min_heap_peek(heap); \
> })
>
> -#define heap_add_or_replace(h, new, cmp, set_backpointer) \
> -do { \
> - if (!heap_add(h, new, cmp, set_backpointer) && \
> - cmp(h, new, heap_peek(h)) >= 0) { \
> - (h)->data[0] = new; \
> - heap_set_backpointer(h, 0, set_backpointer); \
> - heap_sift_down(h, 0, cmp, set_backpointer); \
> - } \
> -} while (0)
>
> -#define heap_del(h, i, cmp, set_backpointer) \
> +#define free_heap(_heap) \
> do { \
> - size_t _i = (i); \
> - \
> - BUG_ON(_i >= (h)->used); \
> - (h)->used--; \
> - if ((_i) < (h)->used) { \
> - heap_swap(h, _i, (h)->used, set_backpointer); \
> - heap_sift_up(h, _i, cmp, set_backpointer); \
> - heap_sift_down(h, _i, cmp, set_backpointer); \
> - } \
> + kvfree((_heap)->heap.data); \
> + (_heap)->heap.data = NULL; \
> } while (0)
>
> -#define heap_pop(h, d, cmp, set_backpointer) \
> -({ \
> - bool _r = (h)->used; \
> - if (_r) { \
> - (d) = (h)->data[0]; \
> - heap_del(h, 0, cmp, set_backpointer); \
> - } \
> - _r; \
> -})
> -
> -#define heap_resort(heap, cmp, set_backpointer) \
> -do { \
> - ssize_t _i; \
> - for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \
> - heap_sift_down(heap, _i, cmp, set_backpointer); \
> -} while (0)
>
> #define ANYSINT_MAX(t) \
> ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
> --
> 2.34.1
>
Powered by blists - more mailing lists