lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 7 Jul 2022 09:33:55 -0600
From:   Khalid Aziz <khalid.aziz@...cle.com>
To:     xhao@...ux.alibaba.com, akpm@...ux-foundation.org,
        willy@...radead.org
Cc:     aneesh.kumar@...ux.ibm.com, arnd@...db.de, 21cnbao@...il.com,
        corbet@....net, dave.hansen@...ux.intel.com, david@...hat.com,
        ebiederm@...ssion.com, hagen@...u.net, jack@...e.cz,
        keescook@...omium.org, kirill@...temov.name, kucharsk@...il.com,
        linkinjeon@...nel.org, linux-fsdevel@...r.kernel.org,
        linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        longpeng2@...wei.com, luto@...nel.org, markhemm@...glemail.com,
        pcc@...gle.com, rppt@...nel.org, sieberf@...zon.com,
        sjpark@...zon.de, surenb@...gle.com, tst@...oebel-theuer.de,
        yzaikin@...gle.com
Subject: Re: [PATCH v2 8/9] mm/mshare: Add basic page table sharing support

On 7/7/22 03:13, Xin Hao wrote:
> 
> On 6/30/22 6:53 AM, Khalid Aziz wrote:
>> Add support for creating a new set of shared page tables in a new
>> mm_struct upon mmap of an mshare region. Add page fault handling in
>> this now mshare'd region. Modify exit_mmap path to make sure page
>> tables in the mshare'd regions are kept intact when a process using
>> mshare'd region exits. Clean up mshare mm_struct when the mshare
>> region is deleted. This support is for the process creating mshare
>> region only. Subsequent patches will add support for other processes
>> to be able to map the mshare region.
>>
>> Signed-off-by: Khalid Aziz <khalid.aziz@...cle.com>
>> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
>> ---
>>   include/linux/mm.h |   2 +
>>   mm/internal.h      |   2 +
>>   mm/memory.c        | 101 +++++++++++++++++++++++++++++-
>>   mm/mshare.c        | 149 ++++++++++++++++++++++++++++++++++++---------
>>   4 files changed, 222 insertions(+), 32 deletions(-)
>>
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index 0ddc3057f73b..63887f06b37b 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -1859,6 +1859,8 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
>>           unsigned long end, unsigned long floor, unsigned long ceiling);
>>   int
>>   copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
>> +int
>> +mshare_copy_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
>>   int follow_pte(struct mm_struct *mm, unsigned long address,
>>              pte_t **ptepp, spinlock_t **ptlp);
>>   int follow_pfn(struct vm_area_struct *vma, unsigned long address,
>> diff --git a/mm/internal.h b/mm/internal.h
>> index 3f2790aea918..6ae7063ac10d 100644
>> --- a/mm/internal.h
>> +++ b/mm/internal.h
>> @@ -861,6 +861,8 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
>>   DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
>> +extern vm_fault_t find_shared_vma(struct vm_area_struct **vma,
>> +                    unsigned long *addrp);
>>   static inline bool vma_is_shared(const struct vm_area_struct *vma)
>>   {
>>       return vma->vm_flags & VM_SHARED_PT;
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 7a089145cad4..2a8d5b8928f5 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -416,15 +416,20 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
>>           unlink_anon_vmas(vma);
>>           unlink_file_vma(vma);
>> +        /*
>> +         * There is no page table to be freed for vmas that
>> +         * are mapped in mshare regions
>> +         */
>>           if (is_vm_hugetlb_page(vma)) {
>>               hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
>>                   floor, next ? next->vm_start : ceiling);
>> -        } else {
>> +        } else if (!vma_is_shared(vma)) {
>>               /*
>>                * Optimization: gather nearby vmas into one call down
>>                */
>>               while (next && next->vm_start <= vma->vm_end + PMD_SIZE
>> -                   && !is_vm_hugetlb_page(next)) {
>> +                   && !is_vm_hugetlb_page(next)
>> +                   && !vma_is_shared(next)) {
>>                   vma = next;
>>                   next = vma->vm_next;
>>                   unlink_anon_vmas(vma);
>> @@ -1260,6 +1265,54 @@ vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
>>       return false;
>>   }
>> +/*
>> + * Copy PTEs for mshare'd pages.
>> + * This code is based upon copy_page_range()
>> + */
>> +int
>> +mshare_copy_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
>> +{
>> +    pgd_t *src_pgd, *dst_pgd;
>> +    unsigned long next;
>> +    unsigned long addr = src_vma->vm_start;
>> +    unsigned long end = src_vma->vm_end;
>> +    struct mm_struct *dst_mm = dst_vma->vm_mm;
>> +    struct mm_struct *src_mm = src_vma->vm_mm;
>> +    struct mmu_notifier_range range;
>> +    int ret = 0;
>> +
>> +    mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
>> +                0, src_vma, src_mm, addr, end);
>> +    mmu_notifier_invalidate_range_start(&range);
>> +    /*
>> +     * Disabling preemption is not needed for the write side, as
>> +     * the read side doesn't spin, but goes to the mmap_lock.
>> +     *
>> +     * Use the raw variant of the seqcount_t write API to avoid
>> +     * lockdep complaining about preemptibility.
>> +     */
>> +    mmap_assert_write_locked(src_mm);
>> +    raw_write_seqcount_begin(&src_mm->write_protect_seq);
>> +
>> +    dst_pgd = pgd_offset(dst_mm, addr);
>> +    src_pgd = pgd_offset(src_mm, addr);
>> +    do {
>> +        next = pgd_addr_end(addr, end);
>> +        if (pgd_none_or_clear_bad(src_pgd))
>> +            continue;
>> +        if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
>> +                        addr, next))) {
>> +            ret = -ENOMEM;
>> +            break;
>> +        }
>> +    } while (dst_pgd++, src_pgd++, addr = next, addr != end);
>> +
>> +    raw_write_seqcount_end(&src_mm->write_protect_seq);
>> +    mmu_notifier_invalidate_range_end(&range);
>> +
>> +    return ret;
>> +}
>> +
>>   int
>>   copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
>>   {
>> @@ -1628,6 +1681,13 @@ void unmap_page_range(struct mmu_gather *tlb,
>>       pgd_t *pgd;
>>       unsigned long next;
>> +    /*
>> +     * No need to unmap vmas that share page table through
>> +     * mshare region
>> +     */
>> +    if (vma_is_shared(vma))
>> +        return;
>> +
>>       BUG_ON(addr >= end);
>>       tlb_start_vma(tlb, vma);
>>       pgd = pgd_offset(vma->vm_mm, addr);
>> @@ -5113,6 +5173,8 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
>>                  unsigned int flags, struct pt_regs *regs)
>>   {
>>       vm_fault_t ret;
>> +    bool shared = false;
>> +    struct mm_struct *orig_mm;
>>       __set_current_state(TASK_RUNNING);
>> @@ -5122,6 +5184,16 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
>>       /* do counter updates before entering really critical section. */
>>       check_sync_rss_stat(current);
>> +    orig_mm = vma->vm_mm;
>> +    if (unlikely(vma_is_shared(vma))) {
>> +        ret = find_shared_vma(&vma, &address);
>> +        if (ret)
>> +            return ret;
>> +        if (!vma)
>> +            return VM_FAULT_SIGSEGV;
>> +        shared = true;
> if  shared is true,  so it mean the origin vma are replaced, but the code not free the origin vma ?

The original vma is not replaced. Only the pointer for the vma to be used for processing mm fault is updated. Original 
vma continues to be part of vma chain for the process and should not be freed.

Thanks,
Khalid

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ