lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 14 Jun 2016 09:07:02 +0200
From:	Michal Hocko <mhocko@...nel.org>
To:	Ebru Akagunduz <ebru.akagunduz@...il.com>
Cc:	linux-mm@...ck.org, hughd@...gle.com, riel@...hat.com,
	akpm@...ux-foundation.org, kirill.shutemov@...ux.intel.com,
	n-horiguchi@...jp.nec.com, aarcange@...hat.com,
	iamjoonsoo.kim@....com, gorcunov@...nvz.org,
	linux-kernel@...r.kernel.org, mgorman@...e.de, rientjes@...gle.com,
	vbabka@...e.cz, aneesh.kumar@...ux.vnet.ibm.com,
	hannes@...xchg.org, boaz@...xistor.com
Subject: Re: [RFC PATCH 1/3] mm, thp: revert allocstall comparing

On Sat 11-06-16 22:15:59, Ebru Akagunduz wrote:
> This patch takes back allocstall comparing when deciding
> whether swapin worthwhile because it does not work,
> if vmevent disabled.
> 
> Related commit:
> http://git.kernel.org/cgit/linux/kernel/git/next/linux-next.git/commit/?id=2548306628308aa6a326640d345a737bc898941d

I guess it would be easier to simply drop
mm-thp-avoid-unnecessary-swapin-in-khugepaged.patch

> Signed-off-by: Ebru Akagunduz <ebru.akagunduz@...il.com>
> ---
>  mm/khugepaged.c | 31 ++++++++-----------------------
>  1 file changed, 8 insertions(+), 23 deletions(-)
> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 0ac63f7..e3d8da7 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -68,7 +68,6 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
>   */
>  static unsigned int khugepaged_max_ptes_none __read_mostly;
>  static unsigned int khugepaged_max_ptes_swap __read_mostly;
> -static unsigned long allocstall;
>  
>  static int khugepaged(void *none);
>  
> @@ -926,7 +925,6 @@ static void collapse_huge_page(struct mm_struct *mm,
>  	struct page *new_page;
>  	spinlock_t *pmd_ptl, *pte_ptl;
>  	int isolated = 0, result = 0;
> -	unsigned long swap, curr_allocstall;
>  	struct mem_cgroup *memcg;
>  	unsigned long mmun_start;	/* For mmu_notifiers */
>  	unsigned long mmun_end;		/* For mmu_notifiers */
> @@ -955,8 +953,6 @@ static void collapse_huge_page(struct mm_struct *mm,
>  		goto out_nolock;
>  	}
>  
> -	swap = get_mm_counter(mm, MM_SWAPENTS);
> -	curr_allocstall = sum_vm_event(ALLOCSTALL);
>  	down_read(&mm->mmap_sem);
>  	result = hugepage_vma_revalidate(mm, address);
>  	if (result) {
> @@ -972,22 +968,15 @@ static void collapse_huge_page(struct mm_struct *mm,
>  		up_read(&mm->mmap_sem);
>  		goto out_nolock;
>  	}
> -
>  	/*
> -	 * Don't perform swapin readahead when the system is under pressure,
> -	 * to avoid unnecessary resource consumption.
> +	 * __collapse_huge_page_swapin always returns with mmap_sem
> +	 * locked.  If it fails, release mmap_sem and jump directly
> +	 * out.  Continuing to collapse causes inconsistency.
>  	 */
> -	if (allocstall == curr_allocstall && swap != 0) {
> -		/*
> -		 * __collapse_huge_page_swapin always returns with mmap_sem
> -		 * locked.  If it fails, release mmap_sem and jump directly
> -		 * out.  Continuing to collapse causes inconsistency.
> -		 */
> -		if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) {
> -			mem_cgroup_cancel_charge(new_page, memcg, true);
> -			up_read(&mm->mmap_sem);
> -			goto out_nolock;
> -		}
> +	if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) {
> +		mem_cgroup_cancel_charge(new_page, memcg, true);
> +		up_read(&mm->mmap_sem);
> +		goto out_nolock;
>  	}
>  
>  	up_read(&mm->mmap_sem);
> @@ -1822,7 +1811,6 @@ static void khugepaged_wait_work(void)
>  		if (!scan_sleep_jiffies)
>  			return;
>  
> -		allocstall = sum_vm_event(ALLOCSTALL);
>  		khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
>  		wait_event_freezable_timeout(khugepaged_wait,
>  					     khugepaged_should_wakeup(),
> @@ -1830,10 +1818,8 @@ static void khugepaged_wait_work(void)
>  		return;
>  	}
>  
> -	if (khugepaged_enabled()) {
> -		allocstall = sum_vm_event(ALLOCSTALL);
> +	if (khugepaged_enabled())
>  		wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
> -	}
>  }
>  
>  static int khugepaged(void *none)
> @@ -1842,7 +1828,6 @@ static int khugepaged(void *none)
>  
>  	set_freezable();
>  	set_user_nice(current, MAX_NICE);
> -	allocstall = sum_vm_event(ALLOCSTALL);
>  
>  	while (!kthread_should_stop()) {
>  		khugepaged_do_scan();
> -- 
> 1.9.1

-- 
Michal Hocko
SUSE Labs

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ