lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Mon, 24 Oct 2016 15:01:12 +0200
From:   Michal Hocko <mhocko@...nel.org>
To:     Lorenzo Stoakes <lstoakes@...il.com>
Cc:     linux-mm@...ck.org, Linus Torvalds <torvalds@...ux-foundation.org>,
        Jan Kara <jack@...e.cz>, Hugh Dickins <hughd@...gle.com>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        Rik van Riel <riel@...hat.com>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Radim Krčmář <rkrcmar@...hat.com>,
        kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH] mm: unexport __get_user_pages()

On Mon 24-10-16 10:57:25, Lorenzo Stoakes wrote:
> This patch unexports the low-level __get_user_pages() function. Recent
> refactoring of the get_user_pages* functions allow flags to be passed through
> get_user_pages() which eliminates the need for access to this function from its
> one user, kvm.
> 
> We can see that the 2 calls to get_user_pages() which replace __get_user_pages()
> in kvm_main.c are equivalent by examining their call stacks:
> 
> get_user_page_nowait():
>   get_user_pages(start, 1, flags, page, NULL)
>   __get_user_pages_locked(current, current->mm, start, 1, page, NULL, NULL,
> 			  false, flags | FOLL_TOUCH)
>   __get_user_pages(current, current->mm, start, 1,
> 		   flags | FOLL_TOUCH | FOLL_GET, page, NULL, NULL)
> 
> check_user_page_hwpoison():
>   get_user_pages(addr, 1, flags, NULL, NULL)
>   __get_user_pages_locked(current, current->mm, addr, 1, NULL, NULL, NULL,
> 			  false, flags | FOLL_TOUCH)
>   __get_user_pages(current, current->mm, addr, 1, flags | FOLL_TOUCH, NULL,
> 		   NULL, NULL)

Hmm, OK. Looks good to me. FOLL_GET is an implicit parameter for g-u-p
now which is a good thing. There are few follow_page but all of them are
pretty much mm internal things. It would be great to document that and
ideally also split and document the rest of flags to external and
internals. Thanks!

> Signed-off-by: Lorenzo Stoakes <lstoakes@...il.com>

Acked-by: Michal Hocko <mhocko@...e.com>

> ---
>  include/linux/mm.h  |  4 ----
>  mm/gup.c            |  3 +--
>  mm/nommu.c          |  2 +-
>  virt/kvm/kvm_main.c | 10 ++++------
>  4 files changed, 6 insertions(+), 13 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 3a19185..a92c8d7 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1271,10 +1271,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
>  extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
>  		void *buf, int len, unsigned int gup_flags);
>  
> -long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
> -		      unsigned long start, unsigned long nr_pages,
> -		      unsigned int foll_flags, struct page **pages,
> -		      struct vm_area_struct **vmas, int *nonblocking);
>  long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
>  			    unsigned long start, unsigned long nr_pages,
>  			    unsigned int gup_flags, struct page **pages,
> diff --git a/mm/gup.c b/mm/gup.c
> index 7aa113c..ec4f827 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -526,7 +526,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
>   * instead of __get_user_pages. __get_user_pages should be used only if
>   * you need some special @gup_flags.
>   */
> -long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
> +static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
>  		unsigned long start, unsigned long nr_pages,
>  		unsigned int gup_flags, struct page **pages,
>  		struct vm_area_struct **vmas, int *nonblocking)
> @@ -631,7 +631,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
>  	} while (nr_pages);
>  	return i;
>  }
> -EXPORT_SYMBOL(__get_user_pages);
>  
>  bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
>  {
> diff --git a/mm/nommu.c b/mm/nommu.c
> index db5fd17..8b8faaf 100644
> --- a/mm/nommu.c
> +++ b/mm/nommu.c
> @@ -109,7 +109,7 @@ unsigned int kobjsize(const void *objp)
>  	return PAGE_SIZE << compound_order(page);
>  }
>  
> -long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
> +static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
>  		      unsigned long start, unsigned long nr_pages,
>  		      unsigned int foll_flags, struct page **pages,
>  		      struct vm_area_struct **vmas, int *nonblocking)
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 28510e7..2907b7b 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1346,21 +1346,19 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
>  static int get_user_page_nowait(unsigned long start, int write,
>  		struct page **page)
>  {
> -	int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
> +	int flags = FOLL_NOWAIT | FOLL_HWPOISON;
>  
>  	if (write)
>  		flags |= FOLL_WRITE;
>  
> -	return __get_user_pages(current, current->mm, start, 1, flags, page,
> -			NULL, NULL);
> +	return get_user_pages(start, 1, flags, page, NULL);
>  }
>  
>  static inline int check_user_page_hwpoison(unsigned long addr)
>  {
> -	int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
> +	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
>  
> -	rc = __get_user_pages(current, current->mm, addr, 1,
> -			      flags, NULL, NULL, NULL);
> +	rc = get_user_pages(addr, 1, flags, NULL, NULL);
>  	return rc == -EHWPOISON;
>  }
>  
> -- 
> 2.10.1

-- 
Michal Hocko
SUSE Labs

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ