[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9c22a18d-85e6-9d7c-b90a-b50af6e3a66b@suse.cz>
Date: Thu, 12 Aug 2021 16:07:29 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
Christoph Hellwig <hch@....de>, Jan Kara <jack@...e.cz>
Subject: Re: [PATCH v14 065/138] mm/writeback: Change __wb_writeout_inc() to
__wb_writeout_add()
On 7/15/21 5:35 AM, Matthew Wilcox (Oracle) wrote:
> Allow for accounting N pages at once instead of one page at a time.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> Reviewed-by: Christoph Hellwig <hch@....de>
> Reviewed-by: Jan Kara <jack@...e.cz>
Acked-by: Vlastimil Babka <vbabka@...e.cz>
> ---
> mm/page-writeback.c | 22 +++++++++++-----------
> 1 file changed, 11 insertions(+), 11 deletions(-)
>
> diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> index f55f2ebdd9a9..e542ea37d605 100644
> --- a/mm/page-writeback.c
> +++ b/mm/page-writeback.c
> @@ -562,12 +562,12 @@ static unsigned long wp_next_time(unsigned long cur_time)
> return cur_time;
> }
>
> -static void wb_domain_writeout_inc(struct wb_domain *dom,
> +static void wb_domain_writeout_add(struct wb_domain *dom,
> struct fprop_local_percpu *completions,
> - unsigned int max_prop_frac)
> + unsigned int max_prop_frac, long nr)
> {
> __fprop_add_percpu_max(&dom->completions, completions,
> - max_prop_frac, 1);
> + max_prop_frac, nr);
> /* First event after period switching was turned off? */
> if (unlikely(!dom->period_time)) {
> /*
> @@ -585,18 +585,18 @@ static void wb_domain_writeout_inc(struct wb_domain *dom,
> * Increment @wb's writeout completion count and the global writeout
> * completion count. Called from test_clear_page_writeback().
> */
> -static inline void __wb_writeout_inc(struct bdi_writeback *wb)
> +static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
> {
> struct wb_domain *cgdom;
>
> - inc_wb_stat(wb, WB_WRITTEN);
> - wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
> - wb->bdi->max_prop_frac);
> + wb_stat_mod(wb, WB_WRITTEN, nr);
> + wb_domain_writeout_add(&global_wb_domain, &wb->completions,
> + wb->bdi->max_prop_frac, nr);
>
> cgdom = mem_cgroup_wb_domain(wb);
> if (cgdom)
> - wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
> - wb->bdi->max_prop_frac);
> + wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
> + wb->bdi->max_prop_frac, nr);
> }
>
> void wb_writeout_inc(struct bdi_writeback *wb)
> @@ -604,7 +604,7 @@ void wb_writeout_inc(struct bdi_writeback *wb)
> unsigned long flags;
>
> local_irq_save(flags);
> - __wb_writeout_inc(wb);
> + __wb_writeout_add(wb, 1);
> local_irq_restore(flags);
> }
> EXPORT_SYMBOL_GPL(wb_writeout_inc);
> @@ -2751,7 +2751,7 @@ int test_clear_page_writeback(struct page *page)
> struct bdi_writeback *wb = inode_to_wb(inode);
>
> dec_wb_stat(wb, WB_WRITEBACK);
> - __wb_writeout_inc(wb);
> + __wb_writeout_add(wb, 1);
> }
> }
>
>
Powered by blists - more mailing lists