[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f9d112a9-d318-05d0-8518-1e7964e49fea@suse.cz>
Date: Thu, 12 Aug 2021 16:08:06 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
Christoph Hellwig <hch@....de>
Subject: Re: [PATCH v14 066/138] mm/writeback: Add __folio_end_writeback()
On 7/15/21 5:35 AM, Matthew Wilcox (Oracle) wrote:
> test_clear_page_writeback() is actually an mm-internal function, although
> it's named as if it's a pagecache function. Move it to mm/internal.h,
> rename it to __folio_end_writeback() and change the return type to bool.
>
> The conversion from page to folio is mostly about accounting the number
> of pages being written back, although it does eliminate a couple of
> calls to compound_head().
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> Reviewed-by: Christoph Hellwig <hch@....de>
Acked-by: Vlastimil Babka <vbabka@...e.cz>
> ---
> include/linux/page-flags.h | 1 -
> mm/filemap.c | 2 +-
> mm/internal.h | 1 +
> mm/page-writeback.c | 29 +++++++++++++++--------------
> 4 files changed, 17 insertions(+), 16 deletions(-)
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index ddb660688086..6f9d1f26b1ef 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -655,7 +655,6 @@ static __always_inline void SetPageUptodate(struct page *page)
>
> CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
>
> -int test_clear_page_writeback(struct page *page);
> int __test_set_page_writeback(struct page *page, bool keep_write);
>
> #define test_set_page_writeback(page) \
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 5c4e3185ecb3..a74c69a938ab 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1535,7 +1535,7 @@ void folio_end_writeback(struct folio *folio)
> * reused before the folio_wake().
> */
> folio_get(folio);
> - if (!test_clear_page_writeback(&folio->page))
> + if (!__folio_end_writeback(folio))
> BUG();
>
> smp_mb__after_atomic();
> diff --git a/mm/internal.h b/mm/internal.h
> index fa31a7f0ed79..08e8a28994d1 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -43,6 +43,7 @@ static inline void *folio_raw_mapping(struct folio *folio)
>
> vm_fault_t do_swap_page(struct vm_fault *vmf);
> void folio_rotate_reclaimable(struct folio *folio);
> +bool __folio_end_writeback(struct folio *folio);
>
> void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
> unsigned long floor, unsigned long ceiling);
> diff --git a/mm/page-writeback.c b/mm/page-writeback.c
> index e542ea37d605..8d5d7921b157 100644
> --- a/mm/page-writeback.c
> +++ b/mm/page-writeback.c
> @@ -583,7 +583,7 @@ static void wb_domain_writeout_add(struct wb_domain *dom,
>
> /*
> * Increment @wb's writeout completion count and the global writeout
> - * completion count. Called from test_clear_page_writeback().
> + * completion count. Called from __folio_end_writeback().
> */
> static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
> {
> @@ -2731,27 +2731,28 @@ int clear_page_dirty_for_io(struct page *page)
> }
> EXPORT_SYMBOL(clear_page_dirty_for_io);
>
> -int test_clear_page_writeback(struct page *page)
> +bool __folio_end_writeback(struct folio *folio)
> {
> - struct address_space *mapping = page_mapping(page);
> - int ret;
> + long nr = folio_nr_pages(folio);
> + struct address_space *mapping = folio_mapping(folio);
> + bool ret;
>
> - lock_page_memcg(page);
> + folio_memcg_lock(folio);
> if (mapping && mapping_use_writeback_tags(mapping)) {
> struct inode *inode = mapping->host;
> struct backing_dev_info *bdi = inode_to_bdi(inode);
> unsigned long flags;
>
> xa_lock_irqsave(&mapping->i_pages, flags);
> - ret = TestClearPageWriteback(page);
> + ret = folio_test_clear_writeback(folio);
> if (ret) {
> - __xa_clear_mark(&mapping->i_pages, page_index(page),
> + __xa_clear_mark(&mapping->i_pages, folio_index(folio),
> PAGECACHE_TAG_WRITEBACK);
> if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
> struct bdi_writeback *wb = inode_to_wb(inode);
>
> - dec_wb_stat(wb, WB_WRITEBACK);
> - __wb_writeout_add(wb, 1);
> + wb_stat_mod(wb, WB_WRITEBACK, -nr);
> + __wb_writeout_add(wb, nr);
> }
> }
>
> @@ -2761,14 +2762,14 @@ int test_clear_page_writeback(struct page *page)
>
> xa_unlock_irqrestore(&mapping->i_pages, flags);
> } else {
> - ret = TestClearPageWriteback(page);
> + ret = folio_test_clear_writeback(folio);
> }
> if (ret) {
> - dec_lruvec_page_state(page, NR_WRITEBACK);
> - dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
> - inc_node_page_state(page, NR_WRITTEN);
> + lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
> + zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
> + node_stat_mod_folio(folio, NR_WRITTEN, nr);
> }
> - unlock_page_memcg(page);
> + folio_memcg_unlock(folio);
> return ret;
> }
>
>
Powered by blists - more mailing lists