lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <081dc7bb-ae60-4a38-b9c8-560280cf5cf8@linux.alibaba.com>
Date: Tue, 12 Mar 2024 15:27:12 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Zi Yan <ziy@...dia.com>, linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
 "Matthew Wilcox (Oracle)" <willy@...radead.org>,
 Yang Shi <shy828301@...il.com>, Huang Ying <ying.huang@...el.com>,
 "Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
 Ryan Roberts <ryan.roberts@....com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2] mm/migrate: put dest folio on deferred split list if
 source was there.



On 2024/3/12 03:58, Zi Yan wrote:
> From: Zi Yan <ziy@...dia.com>
> 
> Commit 616b8371539a6 ("mm: thp: enable thp migration in generic path")
> did not check if a THP is on deferred split list before migration, thus,
> the destination THP is never put on deferred split list even if the source
> THP might be. The opportunity of reclaiming free pages in a partially
> mapped THP during deferred list scanning is lost, but no other harmful
> consequence is present[1]. Checking source folio deferred split list
> status before page unmapped and add destination folio to the list if
> source was after migration.
> 
> [1]: https://lore.kernel.org/linux-mm/03CE3A00-917C-48CC-8E1C-6A98713C817C@nvidia.com/
> 
>  From v1:
> 1. Used dst to get correct deferred split list after migration
>     (per Ryan Roberts).
> 
> Fixes: 616b8371539a ("mm: thp: enable thp migration in generic path")
> Signed-off-by: Zi Yan <ziy@...dia.com>
> ---
>   mm/huge_memory.c | 22 ----------------------
>   mm/internal.h    | 23 +++++++++++++++++++++++
>   mm/migrate.c     | 26 +++++++++++++++++++++++++-
>   3 files changed, 48 insertions(+), 23 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 9859aa4f7553..c6d4d0cdf4b3 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -766,28 +766,6 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
>   	return pmd;
>   }
>   
> -#ifdef CONFIG_MEMCG
> -static inline
> -struct deferred_split *get_deferred_split_queue(struct folio *folio)
> -{
> -	struct mem_cgroup *memcg = folio_memcg(folio);
> -	struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
> -
> -	if (memcg)
> -		return &memcg->deferred_split_queue;
> -	else
> -		return &pgdat->deferred_split_queue;
> -}
> -#else
> -static inline
> -struct deferred_split *get_deferred_split_queue(struct folio *folio)
> -{
> -	struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
> -
> -	return &pgdat->deferred_split_queue;
> -}
> -#endif
> -
>   void folio_prep_large_rmappable(struct folio *folio)
>   {
>   	if (!folio || !folio_test_large(folio))
> diff --git a/mm/internal.h b/mm/internal.h
> index d1c69119b24f..8fa36e84463a 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -1107,6 +1107,29 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
>   				   unsigned long addr, pmd_t *pmd,
>   				   unsigned int flags);
>   
> +#ifdef CONFIG_MEMCG
> +static inline
> +struct deferred_split *get_deferred_split_queue(struct folio *folio)
> +{
> +	struct mem_cgroup *memcg = folio_memcg(folio);
> +	struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
> +
> +	if (memcg)
> +		return &memcg->deferred_split_queue;
> +	else
> +		return &pgdat->deferred_split_queue;
> +}
> +#else
> +static inline
> +struct deferred_split *get_deferred_split_queue(struct folio *folio)
> +{
> +	struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
> +
> +	return &pgdat->deferred_split_queue;
> +}
> +#endif
> +
> +
>   /*
>    * mm/mmap.c
>    */
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 73a052a382f1..591e65658535 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -20,6 +20,7 @@
>   #include <linux/pagemap.h>
>   #include <linux/buffer_head.h>
>   #include <linux/mm_inline.h>
> +#include <linux/mmzone.h>
>   #include <linux/nsproxy.h>
>   #include <linux/ksm.h>
>   #include <linux/rmap.h>
> @@ -1037,7 +1038,10 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
>   enum {
>   	PAGE_WAS_MAPPED = BIT(0),
>   	PAGE_WAS_MLOCKED = BIT(1),
> -	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
> +	PAGE_WAS_ON_DEFERRED_LIST = BIT(2),
> +	PAGE_OLD_STATES = PAGE_WAS_MAPPED |
> +			  PAGE_WAS_MLOCKED |
> +			  PAGE_WAS_ON_DEFERRED_LIST,
>   };
>   
>   static void __migrate_folio_record(struct folio *dst,
> @@ -1168,6 +1172,17 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
>   		folio_lock(src);
>   	}
>   	locked = true;
> +	if (folio_test_large_rmappable(src) &&

IMO, you should check folio_test_large() before calling 
folio_test_large_rmappable(), since the PG_large_rmappable flag is 
stored in the first tail page.

> +		!list_empty(&src->_deferred_list)) {
> +		struct deferred_split *ds_queue = get_deferred_split_queue(src);
> +
> +		spin_lock(&ds_queue->split_queue_lock);
> +		ds_queue->split_queue_len--;
> +		list_del_init(&src->_deferred_list);
> +		spin_unlock(&ds_queue->split_queue_lock);
> +		old_page_state |= PAGE_WAS_ON_DEFERRED_LIST;
> +	}
> +
>   	if (folio_test_mlocked(src))
>   		old_page_state |= PAGE_WAS_MLOCKED;
>   
> @@ -1307,6 +1322,15 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
>   	if (old_page_state & PAGE_WAS_MAPPED)
>   		remove_migration_ptes(src, dst, false);
>   
> +	if (old_page_state & PAGE_WAS_ON_DEFERRED_LIST) {
> +		struct deferred_split *ds_queue = get_deferred_split_queue(dst);
> +
> +		spin_lock(&ds_queue->split_queue_lock);
> +		ds_queue->split_queue_len++;
> +		list_add(&dst->_deferred_list, &ds_queue->split_queue);
> +		spin_unlock(&ds_queue->split_queue_lock);
> +	}
> +
>   out_unlock_both:
>   	folio_unlock(dst);
>   	set_page_owner_migrate_reason(&dst->page, reason);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ