lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <68360241-eb18-b3d8-bf6f-4dbbed258ee6@redhat.com>
Date:   Mon, 9 Mar 2020 09:55:38 +0100
From:   David Hildenbrand <david@...hat.com>
To:     "Huang, Ying" <ying.huang@...el.com>,
        Andrew Morton <akpm@...ux-foundation.org>
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        Mel Gorman <mgorman@...e.de>, Vlastimil Babka <vbabka@...e.cz>,
        Zi Yan <ziy@...dia.com>, Michal Hocko <mhocko@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Minchan Kim <minchan@...nel.org>,
        Johannes Weiner <hannes@...xchg.org>,
        Hugh Dickins <hughd@...gle.com>
Subject: Re: [PATCH -V3] mm: Add PageLayzyFree() helper functions for
 MADV_FREE

On 09.03.20 03:17, Huang, Ying wrote:
> From: Huang Ying <ying.huang@...el.com>
> 
> Now PageSwapBacked() is used as the helper function to check whether
> pages have been freed lazily via MADV_FREE.  This isn't very obvious.
> So Dave suggested to add PageLazyFree() family helper functions to
> improve the code readability.
> 
> Signed-off-by: "Huang, Ying" <ying.huang@...el.com>
> Suggested-by: Dave Hansen <dave.hansen@...ux.intel.com>
> Cc: David Hildenbrand <david@...hat.com>
> Cc: Mel Gorman <mgorman@...e.de>
> Cc: Vlastimil Babka <vbabka@...e.cz>
> Cc: Zi Yan <ziy@...dia.com>
> Cc: Michal Hocko <mhocko@...nel.org>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Minchan Kim <minchan@...nel.org>
> Cc: Johannes Weiner <hannes@...xchg.org>
> Cc: Hugh Dickins <hughd@...gle.com>
> ---
> Changelog:
> 
> v3:
> 
> - Improved comments, Thanks David Hildenbrand!
> 
> - Use the helper function in /proc/PID/smaps lazyfree reporting.
> 
> v2:
> 
> - Avoid code bloat via removing VM_BUG_ON_PAGE(), which doesn't exist
>   in the original code.  Now there is no any text/data/bss size
>   change.
> 
> - Fix one wrong replacement in try_to_unmap_one().
> 
> ---
>  fs/proc/task_mmu.c         |  2 +-
>  include/linux/page-flags.h | 25 +++++++++++++++++++++++++
>  mm/rmap.c                  |  4 ++--
>  mm/swap.c                  | 11 +++--------
>  mm/vmscan.c                |  7 +++----
>  5 files changed, 34 insertions(+), 15 deletions(-)
> 
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 3ba9ae83bff5..3458d5711e57 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -471,7 +471,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
>  	 */
>  	if (PageAnon(page)) {
>  		mss->anonymous += size;
> -		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
> +		if (__PageLazyFree(page) && !dirty && !PageDirty(page))
>  			mss->lazyfree += size;
>  	}
>  
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index 49c2697046b9..bb26f74cbe8e 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -498,6 +498,31 @@ static __always_inline int PageKsm(struct page *page)
>  TESTPAGEFLAG_FALSE(Ksm)
>  #endif
>  
> +/*
> + * For pages freed lazily via MADV_FREE.  Lazyfree pages are clean
> + * anonymous pages.  They don't have PG_swapbacked set, to distinguish
> + * them from normal anonymous pages.
> + */
> +static __always_inline int __PageLazyFree(struct page *page)
> +{
> +	return !PageSwapBacked(page);
> +}
> +
> +static __always_inline int PageLazyFree(struct page *page)
> +{
> +	return PageAnon(page) && __PageLazyFree(page);
> +}
> +
> +static __always_inline void SetPageLazyFree(struct page *page)
> +{
> +	ClearPageSwapBacked(page);
> +}
> +
> +static __always_inline void ClearPageLazyFree(struct page *page)
> +{
> +	SetPageSwapBacked(page);
> +}
> +
>  u64 stable_page_flags(struct page *page);
>  
>  static inline int PageUptodate(struct page *page)
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 1c02adaa233e..6ec96c8e7826 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1609,7 +1609,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
>  			}
>  
>  			/* MADV_FREE page check */
> -			if (!PageSwapBacked(page)) {
> +			if (__PageLazyFree(page)) {
>  				if (!PageDirty(page)) {
>  					/* Invalidate as we cleared the pte */
>  					mmu_notifier_invalidate_range(mm,
> @@ -1623,7 +1623,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
>  				 * discarded. Remap the page to page table.
>  				 */
>  				set_pte_at(mm, address, pvmw.pte, pteval);
> -				SetPageSwapBacked(page);
> +				ClearPageLazyFree(page);
>  				ret = false;
>  				page_vma_mapped_walk_done(&pvmw);
>  				break;
> diff --git a/mm/swap.c b/mm/swap.c
> index c1d3ca80ea10..d83f2cd4cdb8 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -563,7 +563,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
>  static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
>  			    void *arg)
>  {
> -	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
> +	if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
>  	    !PageSwapCache(page) && !PageUnevictable(page)) {
>  		bool active = PageActive(page);
>  
> @@ -571,12 +571,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
>  				       LRU_INACTIVE_ANON + active);
>  		ClearPageActive(page);
>  		ClearPageReferenced(page);
> -		/*
> -		 * lazyfree pages are clean anonymous pages. They have
> -		 * SwapBacked flag cleared to distinguish normal anonymous
> -		 * pages
> -		 */
> -		ClearPageSwapBacked(page);
> +		SetPageLazyFree(page);
>  		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
>  
>  		__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
> @@ -678,7 +673,7 @@ void deactivate_page(struct page *page)
>   */
>  void mark_page_lazyfree(struct page *page)
>  {
> -	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
> +	if (PageLRU(page) && PageAnon(page) && !__PageLazyFree(page) &&
>  	    !PageSwapCache(page) && !PageUnevictable(page)) {
>  		struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
>  
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index eca49a1c2f68..40bb41ada2d2 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1043,8 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
>  	 * Anonymous pages are not handled by flushers and must be written
>  	 * from reclaim context. Do not stall reclaim based on them
>  	 */
> -	if (!page_is_file_cache(page) ||
> -	    (PageAnon(page) && !PageSwapBacked(page))) {
> +	if (!page_is_file_cache(page) || PageLazyFree(page)) {
>  		*dirty = false;
>  		*writeback = false;
>  		return;
> @@ -1235,7 +1234,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  		 * Try to allocate it some swap space here.
>  		 * Lazyfree page could be freed directly
>  		 */
> -		if (PageAnon(page) && PageSwapBacked(page)) {
> +		if (PageAnon(page) && !__PageLazyFree(page)) {
>  			if (!PageSwapCache(page)) {
>  				if (!(sc->gfp_mask & __GFP_IO))
>  					goto keep_locked;
> @@ -1411,7 +1410,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
>  			}
>  		}
>  
> -		if (PageAnon(page) && !PageSwapBacked(page)) {
> +		if (PageLazyFree(page)) {
>  			/* follow __remove_mapping for reference */
>  			if (!page_ref_freeze(page, 1))
>  				goto keep_locked;
> 

I still prefer something like

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index fd6d4670ccc3..7538501230bd 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -63,6 +63,10 @@
  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
  * to become unlocked.
  *
+ * PG_swapbacked used with anonymous pages (PageAnon()) indicates that a
+ * page is backed by swap. Anonymous pages without PG_swapbacked are
+ * pages that can be lazily freed (e.g., MADV_FREE) on demand.
+ *
  * PG_uptodate tells whether the page's contents is valid.  When a read
  * completes, the page becomes uptodate, unless a disk I/O error happened.
  *

and really don't like the use of !__PageLazyFree() instead of PageSwapBacked().

-- 
Thanks,

David / dhildenb

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ