[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Znqq7GUFnwFj-SFI@infradead.org>
Date: Tue, 25 Jun 2024 04:33:00 -0700
From: Christoph Hellwig <hch@...radead.org>
To: alexjlzheng@...il.com
Cc: chandan.babu@...cle.com, djwong@...nel.org, linux-xfs@...r.kernel.org,
linux-kernel@...r.kernel.org, alexjlzheng@...cent.com
Subject: Re: [PATCH] xfs: make xfs_log_iovec independent from xfs_log_vec and
release it early
> --- a/fs/xfs/xfs_log.c
> +++ b/fs/xfs/xfs_log.c
> @@ -2526,6 +2526,8 @@ xlog_write(
> xlog_write_full(lv, ticket, iclog, &log_offset,
> &len, &record_cnt, &data_cnt);
> }
> + if (lv->lv_flags & XFS_LOG_VEC_DYNAMIC)
> + kvfree(lv->lv_iovecp);
This should porbably be a function paramter to xlog_write, with
xlog_cil_write_chain asking for the iovecs to be freed because they
are dynamically allocated, and the other two not becaue the iovecs
are on-stack. With that we don't need to grow a new field in
struct xfs_log_vec.
> list_for_each_entry(lip, &tp->t_items, li_trans) {
> struct xfs_log_vec *lv;
> + struct xfs_log_iovec *lvec;
> int niovecs = 0;
> int nbytes = 0;
> int buf_size;
> @@ -339,18 +339,23 @@ xlog_cil_alloc_shadow_bufs(
> * the buffer, only the log vector header and the iovec
> * storage.
> */
> - kvfree(lip->li_lv_shadow);
> - lv = xlog_kvmalloc(buf_size);
> -
> - memset(lv, 0, xlog_cil_iovec_space(niovecs));
> + if (lip->li_lv_shadow) {
> + kvfree(lip->li_lv_shadow->lv_iovecp);
> + kvfree(lip->li_lv_shadow);
> + }
> + lv = xlog_kvmalloc(sizeof(struct xfs_log_vec));
> + memset(lv, 0, sizeof(struct xfs_log_vec));
> + lvec = xlog_kvmalloc(buf_size);
> + memset(lvec, 0, xlog_cil_iovec_space(niovecs));
This area can use quite a bit of a redo. The xfs_log_vec is tiny,
so it doesn't really need a vmalloc fallback but can simply use
kmalloc.
But more importantly there is no need to really it, you just
need to allocate it. So this should probably become:
lv = lip->li_lv_shadow;
if (!lv) {
/* kmalloc and initialize, set lv_size to zero */
}
if (buf_size > lv->lv_size) {
/* grow case that rallocates ->lv_iovecp */
} else {
/* same or smaller, optimise common overwrite case */
..
}
Powered by blists - more mailing lists