[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210319152428.52683-1-minhquangbui99@gmail.com>
Date: Fri, 19 Mar 2021 22:24:28 +0700
From: Bui Quang Minh <minhquangbui99@...il.com>
To: akpm@...ux-foundation.org
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
minhquangbui99@...il.com
Subject: [PATCH] userfaultfd: Write protect when virtual memory range has no page table entry
userfaultfd_writeprotect() use change_protection() to clear write bit in
page table entries (pte/pmd). So, later write to this virtual address
range causes a page fault, which is then handled by userspace program.
However, change_protection() has no effect when there is no page table
entries associated with that virtual memory range (a newly mapped memory
range). As a result, later access to that memory range causes allocating a
page table entry with write bit still set (due to VM_WRITE flag in
vma->vm_flags).
Add checks for VM_UFFD_WP in vma->vm_flags when allocating new page table
entry in missing page table entry page fault path.
Signed-off-by: Bui Quang Minh <minhquangbui99@...il.com>
---
mm/huge_memory.c | 12 ++++++++++++
mm/memory.c | 10 ++++++++++
2 files changed, 22 insertions(+)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ae907a9c2050..9bb16a55a48c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -636,6 +636,11 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
entry = mk_huge_pmd(page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+ if (userfaultfd_wp(vma)) {
+ entry = pmd_wrprotect(entry);
+ entry = pmd_mkuffd_wp(entry);
+ }
+
page_add_new_anon_rmap(page, vma, haddr, true);
lru_cache_add_inactive_or_unevictable(page, vma);
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
@@ -643,6 +648,13 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm_inc_nr_ptes(vma->vm_mm);
+
+ if (userfaultfd_huge_pmd_wp(vma, *vmf->pmd)) {
+ spin_unlock(vmf->ptl);
+ count_vm_event(THP_FAULT_ALLOC);
+ count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
+ return handle_userfault(vmf, VM_UFFD_WP);
+ }
spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC);
count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
diff --git a/mm/memory.c b/mm/memory.c
index 5efa07fb6cdc..b835746545bf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3564,6 +3564,11 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
+ if (userfaultfd_wp(vma)) {
+ entry = pte_wrprotect(entry);
+ entry = pte_mkuffd_wp(entry);
+ }
+
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (!pte_none(*vmf->pte)) {
@@ -3590,6 +3595,11 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, vmf->address, vmf->pte);
+
+ if (userfaultfd_pte_wp(vma, *vmf->pte)) {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ return handle_userfault(vmf, VM_UFFD_WP);
+ }
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
--
2.25.1
Powered by blists - more mailing lists