lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 15 Apr 2020 13:31:33 -0700
From:   Yang Shi <yang.shi@...ux.alibaba.com>
To:     "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
        akpm@...ux-foundation.org, Andrea Arcangeli <aarcange@...hat.com>
Cc:     Zi Yan <ziy@...dia.com>, Ralph Campbell <rcampbell@...dia.com>,
        John Hubbard <jhubbard@...dia.com>,
        William Kucharski <william.kucharski@...cle.com>,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCHv3, RESEND 2/8] khugepaged: Do not stop collapse if less
 than half PTEs are referenced



On 4/13/20 5:52 AM, Kirill A. Shutemov wrote:
> __collapse_huge_page_swapin() checks the number of referenced PTE to
> decide if the memory range is hot enough to justify swapin.
>
> We have few problems with the approach:
>
>   - It is way too late: we can do the check much earlier and safe time.
>     khugepaged_scan_pmd() already knows if we have any pages to swap in
>     and number of referenced page.
>
>   - It stops collapse altogether if there's not enough referenced pages,
>     not only swappingin.
>
> Fix it by making the right check early. We also can avoid additional
> page table scanning if khugepaged_scan_pmd() haven't found any swap
> entries.
>
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
> Fixes: 0db501f7a34c ("mm, thp: convert from optimistic swapin collapsing to conservative")
> ---
>   mm/khugepaged.c | 25 ++++++++++---------------
>   1 file changed, 10 insertions(+), 15 deletions(-)

Acked-by: Yang Shi <yang.shi@...ux.alibaba.com>

Just a nit below.

>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 99bab7e4d05b..5968ec5ddd6b 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -902,11 +902,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
>   		.pgoff = linear_page_index(vma, address),
>   	};
>   
> -	/* we only decide to swapin, if there is enough young ptes */
> -	if (referenced < HPAGE_PMD_NR/2) {
> -		trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
> -		return false;
> -	}
>   	vmf.pte = pte_offset_map(pmd, address);
>   	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
>   			vmf.pte++, vmf.address += PAGE_SIZE) {
> @@ -946,7 +941,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
>   static void collapse_huge_page(struct mm_struct *mm,
>   				   unsigned long address,
>   				   struct page **hpage,
> -				   int node, int referenced)
> +				   int node, int referenced, int unmapped)
>   {
>   	pmd_t *pmd, _pmd;
>   	pte_t *pte;
> @@ -1003,7 +998,8 @@ static void collapse_huge_page(struct mm_struct *mm,
>   	 * If it fails, we release mmap_sem and jump out_nolock.
>   	 * Continuing to collapse causes inconsistency.
>   	 */
> -	if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
> +	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
> +				pmd, referenced)) {
>   		mem_cgroup_cancel_charge(new_page, memcg, true);
>   		up_read(&mm->mmap_sem);
>   		goto out_nolock;
> @@ -1214,22 +1210,21 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
>   		    mmu_notifier_test_young(vma->vm_mm, address))
>   			referenced++;
>   	}
> -	if (writable) {
> -		if (referenced) {
> +	if (!writable) {
> +		result = SCAN_PAGE_RO;
> +	} else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
> +		result = SCAN_LACK_REFERENCED_PAGE;
> +	} else {
>   			result = SCAN_SUCCEED;
>   			ret = 1;

Shall fix the indentation for the above two statements?

> -		} else {
> -			result = SCAN_LACK_REFERENCED_PAGE;
> -		}
> -	} else {
> -		result = SCAN_PAGE_RO;
>   	}
>   out_unmap:
>   	pte_unmap_unlock(pte, ptl);
>   	if (ret) {
>   		node = khugepaged_find_target_node();
>   		/* collapse_huge_page will return with the mmap_sem released */
> -		collapse_huge_page(mm, address, hpage, node, referenced);
> +		collapse_huge_page(mm, address, hpage, node,
> +				referenced, unmapped);
>   	}
>   out:
>   	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ