lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190422203647.GK14666@redhat.com>
Date:   Mon, 22 Apr 2019 16:36:47 -0400
From:   Jerome Glisse <jglisse@...hat.com>
To:     Laurent Dufour <ldufour@...ux.ibm.com>
Cc:     akpm@...ux-foundation.org, mhocko@...nel.org, peterz@...radead.org,
        kirill@...temov.name, ak@...ux.intel.com, dave@...olabs.net,
        jack@...e.cz, Matthew Wilcox <willy@...radead.org>,
        aneesh.kumar@...ux.ibm.com, benh@...nel.crashing.org,
        mpe@...erman.id.au, paulus@...ba.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>, hpa@...or.com,
        Will Deacon <will.deacon@....com>,
        Sergey Senozhatsky <sergey.senozhatsky@...il.com>,
        sergey.senozhatsky.work@...il.com,
        Andrea Arcangeli <aarcange@...hat.com>,
        Alexei Starovoitov <alexei.starovoitov@...il.com>,
        kemi.wang@...el.com, Daniel Jordan <daniel.m.jordan@...cle.com>,
        David Rientjes <rientjes@...gle.com>,
        Ganesh Mahendran <opensource.ganesh@...il.com>,
        Minchan Kim <minchan@...nel.org>,
        Punit Agrawal <punitagrawal@...il.com>,
        vinayak menon <vinayakm.list@...il.com>,
        Yang Shi <yang.shi@...ux.alibaba.com>,
        zhong jiang <zhongjiang@...wei.com>,
        Haiyan Song <haiyanx.song@...el.com>,
        Balbir Singh <bsingharora@...il.com>, sj38.park@...il.com,
        Michel Lespinasse <walken@...gle.com>,
        Mike Rapoport <rppt@...ux.ibm.com>,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        haren@...ux.vnet.ibm.com, npiggin@...il.com,
        paulmck@...ux.vnet.ibm.com, Tim Chen <tim.c.chen@...ux.intel.com>,
        linuxppc-dev@...ts.ozlabs.org, x86@...nel.org
Subject: Re: [PATCH v12 20/31] mm: introduce vma reference counter

On Tue, Apr 16, 2019 at 03:45:11PM +0200, Laurent Dufour wrote:
> The final goal is to be able to use a VMA structure without holding the
> mmap_sem and to be sure that the structure will not be freed in our back.
> 
> The lockless use of the VMA will be done through RCU protection and thus a
> dedicated freeing service is required to manage it asynchronously.
> 
> As reported in a 2010's thread [1], this may impact file handling when a
> file is still referenced while the mapping is no more there.  As the final
> goal is to handle anonymous VMA in a speculative way and not file backed
> mapping, we could close and free the file pointer in a synchronous way, as
> soon as we are guaranteed to not use it without holding the mmap_sem. For
> sanity reason, in a minimal effort, the vm_file file pointer is unset once
> the file pointer is put.
> 
> [1] https://lore.kernel.org/linux-mm/20100104182429.833180340@chello.nl/
> 
> Signed-off-by: Laurent Dufour <ldufour@...ux.ibm.com>

Using kref would have been better from my POV even with RCU freeing
but anyway:

Reviewed-by: Jérôme Glisse <jglisse@...hat.com>

> ---
>  include/linux/mm.h       |  4 ++++
>  include/linux/mm_types.h |  3 +++
>  mm/internal.h            | 27 +++++++++++++++++++++++++++
>  mm/mmap.c                | 13 +++++++++----
>  4 files changed, 43 insertions(+), 4 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index f14b2c9ddfd4..f761a9c65c74 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -529,6 +529,9 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
>  	vma->vm_mm = mm;
>  	vma->vm_ops = &dummy_vm_ops;
>  	INIT_LIST_HEAD(&vma->anon_vma_chain);
> +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
> +	atomic_set(&vma->vm_ref_count, 1);
> +#endif
>  }
>  
>  static inline void vma_set_anonymous(struct vm_area_struct *vma)
> @@ -1418,6 +1421,7 @@ static inline void INIT_VMA(struct vm_area_struct *vma)
>  	INIT_LIST_HEAD(&vma->anon_vma_chain);
>  #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
>  	seqcount_init(&vma->vm_sequence);
> +	atomic_set(&vma->vm_ref_count, 1);
>  #endif
>  }
>  
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 24b3f8ce9e42..6a6159e11a3f 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -285,6 +285,9 @@ struct vm_area_struct {
>  	/* linked list of VM areas per task, sorted by address */
>  	struct vm_area_struct *vm_next, *vm_prev;
>  
> +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
> +	atomic_t vm_ref_count;
> +#endif
>  	struct rb_node vm_rb;
>  
>  	/*
> diff --git a/mm/internal.h b/mm/internal.h
> index 9eeaf2b95166..302382bed406 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -40,6 +40,33 @@ void page_writeback_init(void);
>  
>  vm_fault_t do_swap_page(struct vm_fault *vmf);
>  
> +
> +extern void __free_vma(struct vm_area_struct *vma);
> +
> +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
> +static inline void get_vma(struct vm_area_struct *vma)
> +{
> +	atomic_inc(&vma->vm_ref_count);
> +}
> +
> +static inline void put_vma(struct vm_area_struct *vma)
> +{
> +	if (atomic_dec_and_test(&vma->vm_ref_count))
> +		__free_vma(vma);
> +}
> +
> +#else
> +
> +static inline void get_vma(struct vm_area_struct *vma)
> +{
> +}
> +
> +static inline void put_vma(struct vm_area_struct *vma)
> +{
> +	__free_vma(vma);
> +}
> +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
> +
>  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
>  		unsigned long floor, unsigned long ceiling);
>  
> diff --git a/mm/mmap.c b/mm/mmap.c
> index f7f6027a7dff..c106440dcae7 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -188,6 +188,12 @@ static inline void mm_write_sequnlock(struct mm_struct *mm)
>  }
>  #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
>  
> +void __free_vma(struct vm_area_struct *vma)
> +{
> +	mpol_put(vma_policy(vma));
> +	vm_area_free(vma);
> +}
> +
>  /*
>   * Close a vm structure and free it, returning the next.
>   */
> @@ -200,8 +206,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
>  		vma->vm_ops->close(vma);
>  	if (vma->vm_file)
>  		fput(vma->vm_file);
> -	mpol_put(vma_policy(vma));
> -	vm_area_free(vma);
> +	vma->vm_file = NULL;
> +	put_vma(vma);
>  	return next;
>  }
>  
> @@ -990,8 +996,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
>  		if (next->anon_vma)
>  			anon_vma_merge(vma, next);
>  		mm->map_count--;
> -		mpol_put(vma_policy(next));
> -		vm_area_free(next);
> +		put_vma(next);
>  		/*
>  		 * In mprotect's case 6 (see comments on vma_merge),
>  		 * we must remove another next too. It would clutter
> -- 
> 2.21.0
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ