lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e418faa0-49bf-1bc6-8f77-2849c1b6ae70@arm.com>
Date:   Fri, 9 Aug 2019 09:57:17 +0100
From:   Steven Price <steven.price@....com>
To:     Christoph Hellwig <hch@....de>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Andrew Morton <akpm@...ux-foundation.org>
Cc:     Thomas Hellström <thomas@...pmail.org>,
        Jerome Glisse <jglisse@...hat.com>,
        Jason Gunthorpe <jgg@...lanox.com>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/3] pagewalk: seperate function pointers from iterator
 data

Subject: s/seperate/separate/

On 08/08/2019 16:42, Christoph Hellwig wrote:
> The mm_walk structure currently mixed data and code.  Split out the
> operations vectors into a new mm_walk_ops structure, and while we
> are changing the API also declare the mm_walk structure inside the
> walk_page_range and walk_page_vma functions.
> 
> Based on patch from Linus Torvalds.
> 
> Signed-off-by: Christoph Hellwig <hch@....de>
> ---
>  arch/openrisc/kernel/dma.c              |  22 +++--
>  arch/powerpc/mm/book3s64/subpage_prot.c |  10 +-
>  arch/s390/mm/gmap.c                     |  33 +++----
>  fs/proc/task_mmu.c                      |  74 +++++++--------
>  include/linux/pagewalk.h                |  64 +++++++------
>  mm/hmm.c                                |  40 +++-----
>  mm/madvise.c                            |  41 +++-----
>  mm/memcontrol.c                         |  23 +++--
>  mm/mempolicy.c                          |  15 ++-
>  mm/migrate.c                            |  15 ++-
>  mm/mincore.c                            |  15 ++-
>  mm/mprotect.c                           |  24 ++---
>  mm/pagewalk.c                           | 118 ++++++++++++++----------
>  13 files changed, 245 insertions(+), 249 deletions(-)
> 

[...]
> diff --git a/mm/pagewalk.c b/mm/pagewalk.c
> index 8a92a961a2ee..28510fc0dde1 100644
> --- a/mm/pagewalk.c
> +++ b/mm/pagewalk.c
> @@ -9,10 +9,11 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
>  {
>  	pte_t *pte;
>  	int err = 0;
> +	const struct mm_walk_ops *ops = walk->ops;
>  
>  	pte = pte_offset_map(pmd, addr);
>  	for (;;) {
> -		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
> +		err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
>  		if (err)
>  		       break;
>  		addr += PAGE_SIZE;
> @@ -30,6 +31,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
>  {
>  	pmd_t *pmd;
>  	unsigned long next;
> +	const struct mm_walk_ops *ops = walk->ops;
>  	int err = 0;
>  
>  	pmd = pmd_offset(pud, addr);
> @@ -37,8 +39,8 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
>  again:
>  		next = pmd_addr_end(addr, end);
>  		if (pmd_none(*pmd) || !walk->vma) {
> -			if (walk->pte_hole)
> -				err = walk->pte_hole(addr, next, walk);
> +			if (ops->pte_hole)
> +				err = ops->pte_hole(addr, next, walk);
>  			if (err)
>  				break;
>  			continue;
> @@ -47,8 +49,8 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
>  		 * This implies that each ->pmd_entry() handler
>  		 * needs to know about pmd_trans_huge() pmds
>  		 */
> -		if (walk->pmd_entry)
> -			err = walk->pmd_entry(pmd, addr, next, walk);
> +		if (ops->pmd_entry)
> +			err = ops->pmd_entry(pmd, addr, next, walk);
>  		if (err)
>  			break;
>  
> @@ -56,7 +58,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
>  		 * Check this here so we only break down trans_huge
>  		 * pages when we _need_ to
>  		 */
> -		if (!walk->pte_entry)
> +		if (!ops->pte_entry)
>  			continue;
>  
>  		split_huge_pmd(walk->vma, pmd, addr);
> @@ -75,6 +77,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
>  {
>  	pud_t *pud;
>  	unsigned long next;
> +	const struct mm_walk_ops *ops = walk->ops;
>  	int err = 0;
>  
>  	pud = pud_offset(p4d, addr);
> @@ -82,18 +85,18 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
>   again:
>  		next = pud_addr_end(addr, end);
>  		if (pud_none(*pud) || !walk->vma) {
> -			if (walk->pte_hole)
> -				err = walk->pte_hole(addr, next, walk);
> +			if (ops->pte_hole)
> +				err = ops->pte_hole(addr, next, walk);
>  			if (err)
>  				break;
>  			continue;
>  		}
>  
> -		if (walk->pud_entry) {
> +		if (ops->pud_entry) {
>  			spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
>  
>  			if (ptl) {
> -				err = walk->pud_entry(pud, addr, next, walk);
> +				err = ops->pud_entry(pud, addr, next, walk);
>  				spin_unlock(ptl);
>  				if (err)
>  					break;
> @@ -105,7 +108,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
>  		if (pud_none(*pud))
>  			goto again;
>  
> -		if (walk->pmd_entry || walk->pte_entry)
> +		if (ops->pmd_entry || ops->pte_entry)
>  			err = walk_pmd_range(pud, addr, next, walk);
>  		if (err)
>  			break;
> @@ -119,19 +122,20 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
>  {
>  	p4d_t *p4d;
>  	unsigned long next;
> +	const struct mm_walk_ops *ops = walk->ops;
>  	int err = 0;
>  
>  	p4d = p4d_offset(pgd, addr);
>  	do {
>  		next = p4d_addr_end(addr, end);
>  		if (p4d_none_or_clear_bad(p4d)) {
> -			if (walk->pte_hole)
> -				err = walk->pte_hole(addr, next, walk);
> +			if (ops->pte_hole)
> +				err = ops->pte_hole(addr, next, walk);
>  			if (err)
>  				break;
>  			continue;
>  		}
> -		if (walk->pmd_entry || walk->pte_entry)
> +		if (ops->pmd_entry || ops->pte_entry)
>  			err = walk_pud_range(p4d, addr, next, walk);
>  		if (err)
>  			break;
> @@ -145,19 +149,20 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
>  {
>  	pgd_t *pgd;
>  	unsigned long next;
> +	const struct mm_walk_ops *ops = walk->ops;
>  	int err = 0;
>  
>  	pgd = pgd_offset(walk->mm, addr);
>  	do {
>  		next = pgd_addr_end(addr, end);
>  		if (pgd_none_or_clear_bad(pgd)) {
> -			if (walk->pte_hole)
> -				err = walk->pte_hole(addr, next, walk);
> +			if (ops->pte_hole)
> +				err = ops->pte_hole(addr, next, walk);
>  			if (err)
>  				break;
>  			continue;
>  		}
> -		if (walk->pmd_entry || walk->pte_entry)
> +		if (ops->pmd_entry || ops->pte_entry)
>  			err = walk_p4d_range(pgd, addr, next, walk);
>  		if (err)
>  			break;
> @@ -183,6 +188,7 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
>  	unsigned long hmask = huge_page_mask(h);
>  	unsigned long sz = huge_page_size(h);
>  	pte_t *pte;
> +	const struct mm_walk_ops *ops = walk->ops;
>  	int err = 0;
>  
>  	do {
> @@ -190,9 +196,9 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
>  		pte = huge_pte_offset(walk->mm, addr & hmask, sz);
>  
>  		if (pte)
> -			err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
> -		else if (walk->pte_hole)
> -			err = walk->pte_hole(addr, next, walk);
> +			err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
> +		else if (ops->pte_hole)
> +			err = ops->pte_hole(addr, next, walk);
>  
>  		if (err)
>  			break;
> @@ -220,9 +226,10 @@ static int walk_page_test(unsigned long start, unsigned long end,
>  			struct mm_walk *walk)
>  {
>  	struct vm_area_struct *vma = walk->vma;
> +	const struct mm_walk_ops *ops = walk->ops;
>  
> -	if (walk->test_walk)
> -		return walk->test_walk(start, end, walk);
> +	if (ops->test_walk)
> +		return ops->test_walk(start, end, walk);
>  
>  	/*
>  	 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
> @@ -234,8 +241,8 @@ static int walk_page_test(unsigned long start, unsigned long end,
>  	 */
>  	if (vma->vm_flags & VM_PFNMAP) {
>  		int err = 1;
> -		if (walk->pte_hole)
> -			err = walk->pte_hole(start, end, walk);
> +		if (ops->pte_hole)
> +			err = ops->pte_hole(start, end, walk);
>  		return err ? err : 1;
>  	}
>  	return 0;
> @@ -248,7 +255,8 @@ static int __walk_page_range(unsigned long start, unsigned long end,
>  	struct vm_area_struct *vma = walk->vma;
>  
>  	if (vma && is_vm_hugetlb_page(vma)) {
> -		if (walk->hugetlb_entry)
> +		const struct mm_walk_ops *ops = walk->ops;

NIT: checkpatch would like a blank line here

> +		if (ops->hugetlb_entry)
>  			err = walk_hugetlb_range(start, end, walk);
>  	} else
>  		err = walk_pgd_range(start, end, walk);
> @@ -258,11 +266,13 @@ static int __walk_page_range(unsigned long start, unsigned long end,
>  
>  /**
>   * walk_page_range - walk page table with caller specific callbacks
> - * @start: start address of the virtual address range
> - * @end: end address of the virtual address range
> - * @walk: mm_walk structure defining the callbacks and the target address space
> + * @mm:		mm_struct representing the target process of page table walk
> + * @start:	start address of the virtual address range
> + * @end:	end address of the virtual address range
> + * @ops:	operation to call during the walk
> + * @private:	private data for callbacks' usage
>   *
> - * Recursively walk the page table tree of the process represented by @walk->mm
> + * Recursively walk the page table tree of the process represented by @mm
>   * within the virtual address range [@start, @end). During walking, we can do
>   * some caller-specific works for each entry, by setting up pmd_entry(),
>   * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these

Missing context:
>  *
>  * Before starting to walk page table, some callers want to check whether
>  * they really want to walk over the current vma, typically by checking
>  * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
>  * purpose.

@walk->test_walk() should now be @ops->test_walk()

> @@ -283,42 +293,48 @@ static int __walk_page_range(unsigned long start, unsigned long end,
>   *
>   * struct mm_walk keeps current values of some common data like vma and pmd,
>   * which are useful for the access from callbacks. If you want to pass some
> - * caller-specific data to callbacks, @walk->private should be helpful.
> + * caller-specific data to callbacks, @private should be helpful.
>   *
>   * Locking:
>   *   Callers of walk_page_range() and walk_page_vma() should hold
>   *   @walk->mm->mmap_sem, because these function traverse vma list and/or

s/walk->//

Otherwise looks good - I've rebased my series on it and the initial
testing is fine. So for the series:

Reviewed-by: Steven Price <steven.price@....com>

Thanks,

Steve

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ