lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <50ABB06A.9000402@redhat.com>
Date:	Tue, 20 Nov 2012 11:31:38 -0500
From:	Rik van Riel <riel@...hat.com>
To:	Ingo Molnar <mingo@...nel.org>
CC:	Linus Torvalds <torvalds@...ux-foundation.org>,
	David Rientjes <rientjes@...gle.com>,
	Mel Gorman <mgorman@...e.de>,
	Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
	linux-mm <linux-mm@...ck.org>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>,
	Paul Turner <pjt@...gle.com>,
	Lee Schermerhorn <Lee.Schermerhorn@...com>,
	Christoph Lameter <cl@...ux.com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Andrea Arcangeli <aarcange@...hat.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Johannes Weiner <hannes@...xchg.org>,
	Hugh Dickins <hughd@...gle.com>
Subject: Re: [PATCH, v2] mm, numa: Turn 4K pte NUMA faults into effective
 hugepage ones

On 11/20/2012 11:09 AM, Ingo Molnar wrote:

> Subject: mm, numa: Turn 4K pte NUMA faults into effective hugepage ones
> From: Ingo Molnar <mingo@...nel.org>
> Date: Tue Nov 20 15:48:26 CET 2012
>
> Reduce the 4K page fault count by looking around and processing
> nearby pages if possible.

This is essentially what autonuma does with PMD level NUMA
faults, so we know this idea works.

Performance measurements will show us how much of an impact
it makes, since I don't think we have never done apples to apples
comparisons with just this thing toggled :)

The patch looks good for me, just a nit-pick on the comment
to do_numa_page().

Other than that:

Acked-by: Rik van Riel <riel@...hat.com>

> Index: linux/mm/memory.c
> ===================================================================
> --- linux.orig/mm/memory.c
> +++ linux/mm/memory.c
> @@ -3455,64 +3455,93 @@ static int do_nonlinear_fault(struct mm_
>   	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
>   }
>
> -static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
> +static int __do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
>   			unsigned long address, pte_t *ptep, pmd_t *pmd,
> -			unsigned int flags, pte_t entry)
> +			unsigned int flags, pte_t entry, spinlock_t *ptl)
>   {
> -	struct page *page = NULL;
> -	int node, page_nid = -1;
> -	int last_cpu = -1;
> -	spinlock_t *ptl;
> -
> -	ptl = pte_lockptr(mm, pmd);
> -	spin_lock(ptl);
> -	if (unlikely(!pte_same(*ptep, entry)))
> -		goto out_unlock;
> +	struct page *page;
> +	int new_node;
>
>   	page = vm_normal_page(vma, address, entry);
>   	if (page) {
> -		get_page(page);
> -		page_nid = page_to_nid(page);
> -		last_cpu = page_last_cpu(page);
> -		node = mpol_misplaced(page, vma, address);
> -		if (node != -1 && node != page_nid)
> +		int page_nid = page_to_nid(page);
> +		int last_cpu = page_last_cpu(page);
> +
> +		task_numa_fault(page_nid, last_cpu, 1);
> +
> +		new_node = mpol_misplaced(page, vma, address);
> +		if (new_node != -1 && new_node != page_nid)
>   			goto migrate;
>   	}
>
> -out_pte_upgrade_unlock:
> +out_pte_upgrade:
>   	flush_cache_page(vma, address, pte_pfn(entry));
> -
>   	ptep_modify_prot_start(mm, address, ptep);
>   	entry = pte_modify(entry, vma->vm_page_prot);
> +	if (pte_dirty(entry))
> +		entry = pte_mkwrite(entry);
>   	ptep_modify_prot_commit(mm, address, ptep, entry);
> -
>   	/* No TLB flush needed because we upgraded the PTE */
> -
>   	update_mmu_cache(vma, address, ptep);
> -
> -out_unlock:
> -	pte_unmap_unlock(ptep, ptl);
> -
> -	if (page) {
> -		task_numa_fault(page_nid, last_cpu, 1);
> -		put_page(page);
> -	}
>   out:
>   	return 0;
>
>   migrate:
> +	get_page(page);
>   	pte_unmap_unlock(ptep, ptl);
>
> -	if (migrate_misplaced_page(page, node)) {
> +	migrate_misplaced_page(page, new_node); /* Drops the page reference */
> +
> +	/* Re-check after migration: */
> +
> +	ptl = pte_lockptr(mm, pmd);
> +	spin_lock(ptl);
> +	entry = ACCESS_ONCE(*ptep);
> +
> +	if (!pte_numa(vma, entry))
>   		goto out;
> -	}
> -	page = NULL;
>
> -	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
> -	if (!pte_same(*ptep, entry))
> -		goto out_unlock;
> +	goto out_pte_upgrade;
> +}
> +
> +/*
> + * Add a simple loop to also fetch ptes within the same pmd:
> + */

That's not a very useful comment. How about something like:

   /*
    * Also fault over nearby ptes from within the same pmd and vma,
    * in order to minimize the overhead from page fault exceptions
    * and TLB flushes.
    */

> +static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
> +			unsigned long addr0, pte_t *ptep0, pmd_t *pmd,
> +			unsigned int flags, pte_t entry0)
> +{
> +	unsigned long addr0_pmd;
> +	unsigned long addr_start;
> +	unsigned long addr;
> +	spinlock_t *ptl;
> +	pte_t *ptep;
> +
> +	addr0_pmd = addr0 & PMD_MASK;
> +	addr_start = max(addr0_pmd, vma->vm_start);
>
> -	goto out_pte_upgrade_unlock;
> +	ptep = pte_offset_map(pmd, addr_start);
> +	ptl = pte_lockptr(mm, pmd);
> +	spin_lock(ptl);
> +
> +	for (addr = addr_start; addr < vma->vm_end; addr += PAGE_SIZE, ptep++) {
> + 		pte_t entry;
> +
> +		entry = ACCESS_ONCE(*ptep);
> +
> +		if ((addr & PMD_MASK) != addr0_pmd)
> +			break;
> +		if (!pte_present(entry))
> +			continue;
> +		if (!pte_numa(vma, entry))
> +			continue;
> +
> +		__do_numa_page(mm, vma, addr, ptep, pmd, flags, entry, ptl);
> +	}
> +
> +	pte_unmap_unlock(ptep, ptl);
> +	
> +	return 0;
>   }
>
>   /*
>


-- 
All rights reversed
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ