lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <E1JEMsg-00079n-FK@pomaz-ex.szeredi.hu>
Date:	Mon, 14 Jan 2008 11:49:26 +0100
From:	Miklos Szeredi <miklos@...redi.hu>
To:	salikhmetov@...il.com
CC:	linux-mm@...ck.org, jakob@...hought.net,
	linux-kernel@...r.kernel.org, valdis.kletnieks@...edu,
	riel@...hat.com, ksm@...dk, staubach@...hat.com,
	jesper.juhl@...il.com, torvalds@...ux-foundation.org,
	a.p.zijlstra@...llo.nl, akpm@...ux-foundation.org,
	protasnb@...il.com
Subject: Re: [PATCH 1/2] massive code cleanup of sys_msync()

> Substantial code cleanup of the sys_msync() function:
> 
> 1) using the PAGE_ALIGN() macro instead of "manual" alignment;
> 2) improved readability of the loop traversing the process memory regions.

Thanks for doing this.  See comments below.

> Signed-off-by: Anton Salikhmetov <salikhmetov@...il.com>
> ---
>  mm/msync.c |   78 +++++++++++++++++++++++++++---------------------------------
>  1 files changed, 35 insertions(+), 43 deletions(-)
> 
> diff --git a/mm/msync.c b/mm/msync.c
> index 144a757..ff654c9 100644
> --- a/mm/msync.c
> +++ b/mm/msync.c
> @@ -1,24 +1,25 @@
>  /*
>   *	linux/mm/msync.c
>   *
> + * The msync() system call.
>   * Copyright (C) 1994-1999  Linus Torvalds
> + *
> + * Substantial code cleanup.
> + * Copyright (C) 2008 Anton Salikhmetov <salikhmetov@...il.com>
>   */
>  
> -/*
> - * The msync() system call.
> - */
> +#include <linux/file.h>
>  #include <linux/fs.h>
>  #include <linux/mm.h>
>  #include <linux/mman.h>
> -#include <linux/file.h>
> -#include <linux/syscalls.h>
>  #include <linux/sched.h>
> +#include <linux/syscalls.h>
>  
>  /*
>   * MS_SYNC syncs the entire file - including mappings.
>   *
>   * MS_ASYNC does not start I/O (it used to, up to 2.5.67).
> - * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
> + * Nor does it mark the relevant pages dirty (it used to up to 2.6.17).
>   * Now it doesn't do anything, since dirty pages are properly tracked.
>   *
>   * The application may now run fsync() to
> @@ -33,71 +34,62 @@ asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
>  	unsigned long end;
>  	struct mm_struct *mm = current->mm;
>  	struct vm_area_struct *vma;
> -	int unmapped_error = 0;
> -	int error = -EINVAL;
> +	int error = 0, unmapped_error = 0;
>  
>  	if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
> -		goto out;
> +		return -EINVAL;
>  	if (start & ~PAGE_MASK)
> -		goto out;
> +		return -EINVAL;
>  	if ((flags & MS_ASYNC) && (flags & MS_SYNC))
> -		goto out;
> -	error = -ENOMEM;
> -	len = (len + ~PAGE_MASK) & PAGE_MASK;
> +		return -EINVAL;
> +
> +	len = PAGE_ALIGN(len);
>  	end = start + len;
>  	if (end < start)
> -		goto out;
> -	error = 0;
> +		return -ENOMEM;
>  	if (end == start)
> -		goto out;
> +		return 0;
> +
>  	/*
>  	 * If the interval [start,end) covers some unmapped address ranges,
>  	 * just ignore them, but return -ENOMEM at the end.
>  	 */
>  	down_read(&mm->mmap_sem);
>  	vma = find_vma(mm, start);
> -	for (;;) {
> +	do {
>  		struct file *file;
>  
> -		/* Still start < end. */
> -		error = -ENOMEM;
> -		if (!vma)
> -			goto out_unlock;
> -		/* Here start < vma->vm_end. */
> +		if (!vma) {
> +			error = -ENOMEM;
> +			break;
> +		}
>  		if (start < vma->vm_start) {
>  			start = vma->vm_start;
> -			if (start >= end)
> -				goto out_unlock;
> +			if (start >= end) {
> +				error = -ENOMEM;
> +				break;
> +			}
>  			unmapped_error = -ENOMEM;
>  		}
> -		/* Here vma->vm_start <= start < vma->vm_end. */
> -		if ((flags & MS_INVALIDATE) &&
> -				(vma->vm_flags & VM_LOCKED)) {
> +		if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED)) {
>  			error = -EBUSY;
> -			goto out_unlock;
> +			break;
>  		}
>  		file = vma->vm_file;
> -		start = vma->vm_end;
> -		if ((flags & MS_SYNC) && file &&
> -				(vma->vm_flags & VM_SHARED)) {
> +		if ((flags & MS_SYNC) && file && (vma->vm_flags & VM_SHARED)) {
>  			get_file(file);
>  			up_read(&mm->mmap_sem);
>  			error = do_fsync(file, 0);
>  			fput(file);
> -			if (error || start >= end)
> -				goto out;
> +			if (error)
> +				return error;
>  			down_read(&mm->mmap_sem);
> -			vma = find_vma(mm, start);

Where did this line go?  It's needed because after releasing and
reacquiring the mmap sem, the old vma may have gone away.

I suggest, that when doing such a massive cleanup, that you split it
up even further into easily understandable pieces, so such bugs cannot
creep in.

> -		} else {
> -			if (start >= end) {
> -				error = 0;
> -				goto out_unlock;
> -			}
> -			vma = vma->vm_next;
>  		}
> -	}
> -out_unlock:
> +
> +		start = vma->vm_end;
> +		vma = vma->vm_next;
> +	} while (start < end);
>  	up_read(&mm->mmap_sem);
> -out:
> +
>  	return error ? : unmapped_error;
>  }
> -- 
> 1.4.4.4

Miklos
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ