[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YPao+syEWXGhDxay@kernel.org>
Date: Tue, 20 Jul 2021 13:44:10 +0300
From: Mike Rapoport <rppt@...nel.org>
To: "Matthew Wilcox (Oracle)" <willy@...radead.org>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org, Yu Zhao <yuzhao@...gle.com>,
Christoph Hellwig <hch@....de>,
David Howells <dhowells@...hat.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: Re: [PATCH v14 011/138] mm/lru: Add folio LRU functions
On Thu, Jul 15, 2021 at 04:34:57AM +0100, Matthew Wilcox (Oracle) wrote:
> Handle arbitrary-order folios being added to the LRU. By definition,
> all pages being added to the LRU were already head or base pages, but
> call page_folio() on them anyway to get the type right and avoid the
> buried calls to compound_head().
>
> Saves 783 bytes of kernel text; no functions grow.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> Reviewed-by: Yu Zhao <yuzhao@...gle.com>
> Reviewed-by: Christoph Hellwig <hch@....de>
> Reviewed-by: David Howells <dhowells@...hat.com>
> Acked-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
> ---
> include/linux/mm_inline.h | 98 ++++++++++++++++++++++------------
> include/trace/events/pagemap.h | 2 +-
> 2 files changed, 65 insertions(+), 35 deletions(-)
Acked-by: Mike Rapoport <rppt@...ux.ibm.com>
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index 355ea1ee32bd..ee155d19885e 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -6,22 +6,27 @@
> #include <linux/swap.h>
>
> /**
> - * page_is_file_lru - should the page be on a file LRU or anon LRU?
> - * @page: the page to test
> + * folio_is_file_lru - should the folio be on a file LRU or anon LRU?
> + * @folio: the folio to test
> *
> - * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
> - * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
> - * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
> - * functions that manipulate the LRU lists, to sort a page onto the right LRU
> - * list.
> + * Returns 1 if @folio is a regular filesystem backed page cache folio
> + * or a lazily freed anonymous folio (e.g. via MADV_FREE). Returns 0 if
> + * @folio is a normal anonymous folio, a tmpfs folio or otherwise ram or
> + * swap backed folio. Used by functions that manipulate the LRU lists,
> + * to sort a folio onto the right LRU list.
> *
> * We would like to get this info without a page flag, but the state
> - * needs to survive until the page is last deleted from the LRU, which
> + * needs to survive until the folio is last deleted from the LRU, which
> * could be as far down as __page_cache_release.
It seems mm_inline.h is not a part of generated API docs, otherwise
kerneldoc would be unhappy about missing Return: description.
> */
> +static inline int folio_is_file_lru(struct folio *folio)
> +{
> + return !folio_test_swapbacked(folio);
> +}
> +
> static inline int page_is_file_lru(struct page *page)
> {
> - return !PageSwapBacked(page);
> + return folio_is_file_lru(page_folio(page));
> }
>
> static __always_inline void update_lru_size(struct lruvec *lruvec,
> @@ -39,69 +44,94 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
> }
>
> /**
> - * __clear_page_lru_flags - clear page lru flags before releasing a page
> - * @page: the page that was on lru and now has a zero reference
> + * __folio_clear_lru_flags - clear page lru flags before releasing a page
> + * @folio: The folio that was on lru and now has a zero reference
> */
> -static __always_inline void __clear_page_lru_flags(struct page *page)
> +static __always_inline void __folio_clear_lru_flags(struct folio *folio)
> {
> - VM_BUG_ON_PAGE(!PageLRU(page), page);
> + VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
>
> - __ClearPageLRU(page);
> + __folio_clear_lru(folio);
>
> /* this shouldn't happen, so leave the flags to bad_page() */
> - if (PageActive(page) && PageUnevictable(page))
> + if (folio_test_active(folio) && folio_test_unevictable(folio))
> return;
>
> - __ClearPageActive(page);
> - __ClearPageUnevictable(page);
> + __folio_clear_active(folio);
> + __folio_clear_unevictable(folio);
> +}
> +
> +static __always_inline void __clear_page_lru_flags(struct page *page)
> +{
> + __folio_clear_lru_flags(page_folio(page));
> }
>
> /**
> - * page_lru - which LRU list should a page be on?
> - * @page: the page to test
> + * folio_lru_list - which LRU list should a folio be on?
> + * @folio: the folio to test
> *
> - * Returns the LRU list a page should be on, as an index
> + * Returns the LRU list a folio should be on, as an index
^ Return:
> * into the array of LRU lists.
> */
> -static __always_inline enum lru_list page_lru(struct page *page)
> +static __always_inline enum lru_list folio_lru_list(struct folio *folio)
> {
> enum lru_list lru;
>
> - VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
> + VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
>
> - if (PageUnevictable(page))
> + if (folio_test_unevictable(folio))
> return LRU_UNEVICTABLE;
>
> - lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
> - if (PageActive(page))
> + lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
> + if (folio_test_active(folio))
> lru += LRU_ACTIVE;
>
> return lru;
> }
. . .
Powered by blists - more mailing lists