[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221223155616.297723-3-david@redhat.com>
Date: Fri, 23 Dec 2022 16:56:16 +0100
From: David Hildenbrand <david@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-mm@...ck.org, David Hildenbrand <david@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Peter Xu <peterx@...hat.com>, Hugh Dickins <hughd@...gle.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Nadav Amit <nadav.amit@...il.com>
Subject: [PATCH v1 2/2] mm/mprotect: drop pgprot_t parameter from change_protection()
Being able to provide a custom protection opens the door for
inconsistencies and BUGs: for example, accidentally allowing for more
permissions than desired by other mechanisms (e.g., softdirty tracking).
vma->vm_page_prot should be the single source of truth.
Only PROT_NUMA is special: there is no way we can erroneously allow
for more permissions when removing all permissions. Special-case using
the MM_CP_PROT_NUMA flag.
Signed-off-by: David Hildenbrand <david@...hat.com>
---
include/linux/mm.h | 3 +--
mm/mempolicy.c | 3 +--
mm/mprotect.c | 14 +++++++++++---
mm/userfaultfd.c | 3 +--
4 files changed, 14 insertions(+), 9 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f3f196e4d66d..b8be8c33ca20 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2148,8 +2148,7 @@ bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
extern unsigned long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgprot_t newprot,
- unsigned long cp_flags);
+ unsigned long end, unsigned long cp_flags);
extern int mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 61aa9aedb728..c3f02703a710 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -635,8 +635,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
tlb_gather_mmu(&tlb, vma->vm_mm);
- nr_updated = change_protection(&tlb, vma, addr, end, PAGE_NONE,
- MM_CP_PROT_NUMA);
+ nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 908df12caa26..569cefa668a6 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -503,13 +503,21 @@ static unsigned long change_protection_range(struct mmu_gather *tlb,
unsigned long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgprot_t newprot,
- unsigned long cp_flags)
+ unsigned long end, unsigned long cp_flags)
{
+ pgprot_t newprot = vma->vm_page_prot;
unsigned long pages;
BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
+ /*
+ * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking)
+ * are expected to reflect their requirements via VMA flags such that
+ * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
+ */
+ if (cp_flags & MM_CP_PROT_NUMA)
+ newprot = PAGE_NONE;
+
if (is_vm_hugetlb_page(vma))
pages = hugetlb_change_protection(vma, start, end, newprot,
cp_flags);
@@ -638,7 +646,7 @@ mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
vma_set_page_prot(vma);
- change_protection(tlb, vma, start, end, vma->vm_page_prot, mm_cp_flags);
+ change_protection(tlb, vma, start, end, mm_cp_flags);
/*
* Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 351e8d6b398b..be7ee9d82e72 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -744,8 +744,7 @@ void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
if (vma_wants_manual_pte_write_upgrade(dst_vma))
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
tlb_gather_mmu(&tlb, dst_mm);
- change_protection(&tlb, dst_vma, start, start + len, vma->vm_page_prot,
- mm_cp_flags);
+ change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
tlb_finish_mmu(&tlb);
}
--
2.38.1
Powered by blists - more mailing lists