lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 17 Oct 2023 11:48:26 +0800
From:   Kefeng Wang <wangkefeng.wang@...wei.com>
To:     "Vishal Moola (Oracle)" <vishal.moola@...il.com>,
        <linux-mm@...ck.org>
CC:     <linux-kernel@...r.kernel.org>, <akpm@...ux-foundation.org>,
        <shy828301@...il.com>
Subject: Re: [PATCH 4/5] mm/khugepaged: Convert alloc_charge_hpage() to use
 folios



On 2023/10/17 4:05, Vishal Moola (Oracle) wrote:
> Also convert hpage_collapse_alloc_page() to
> hpage_collapse_alloc_folio().
> 
> This removes 1 call to compound_head() and helps convert khugepaged to
> use folios throughout.
> 
> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@...il.com>
> ---
>   mm/khugepaged.c | 13 +++++++------
>   1 file changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index fa21a53ce0c0..70bba8ddea13 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -886,16 +886,16 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
>   }
>   #endif
>   
> -static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
> +static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
>   				      nodemask_t *nmask)
>   {
> -	*hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
> -	if (unlikely(!*hpage)) {
> +	*folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
> +
> +	if (unlikely(!*folio)) {
>   		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
>   		return false;
>   	}
>   
> -	folio_prep_large_rmappable((struct folio *)*hpage);
>   	count_vm_event(THP_COLLAPSE_ALLOC);
>   	return true;
>   }
> @@ -1062,15 +1062,16 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
>   	int node = hpage_collapse_find_target_node(cc);
>   	struct folio *folio;
>   
> -	if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
> +	if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask))
>   		return SCAN_ALLOC_HUGE_PAGE_FAIL;
>   
> -	folio = page_folio(*hpage);
>   	if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
>   		folio_put(folio);
>   		*hpage = NULL;
>   		return SCAN_CGROUP_CHARGE_FAIL;
>   	}
> +
> +	*hpage = folio_page(folio, 0);
>   	count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);

count_memcg_folio_events()and kill count_memcg_page_event?
>   
>   	return SCAN_SUCCEED;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ