lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <YwySW3ROc21hN7g9@hirez.programming.kicks-ass.net> Date: Mon, 29 Aug 2022 12:18:03 +0200 From: Peter Zijlstra <peterz@...radead.org> To: Dave Hansen <dave.hansen@...el.com>, x86@...nel.org Cc: linux-kernel@...r.kernel.org, linux-hardening@...r.kernel.org, keescook@...omium.org, Sean Christopherson <seanjc@...gle.com> Subject: [PATCH v2] x86/mm: Refuse W^X violations x86 has STRICT_*_RWX, but not even a warning when someone violates it. Add this warning and fully refuse the transition. Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org> --- arch/x86/mm/pat/set_memory.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -580,6 +580,33 @@ static inline pgprot_t static_protection } /* + * Validate and enforce strict W^X semantics. + */ +static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start, + unsigned long pfn, unsigned long npg) +{ + unsigned long end; + + if (!cpu_feature_enabled(X86_FEATURE_NX)) + return new; + + if (!((pgprot_val(old) ^ pgprot_val(new)) & (_PAGE_RW | _PAGE_NX))) + return new; + + if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW) + return new; + + end = start + npg * PAGE_SIZE - 1; + WARN_ONCE(1, "CPA refuse W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n", + (unsigned long long)pgprot_val(old), + (unsigned long long)pgprot_val(new), + start, end, pfn); + + /* refuse the transition into WX */ + return old; +} + +/* * Lookup the page table entry for a virtual address in a specific pgd. * Return a pointer to the entry and the level of the mapping. */ @@ -885,6 +912,8 @@ static int __should_split_large_page(pte new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, psize, CPA_DETECT); + new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages); + /* * If there is a conflict, split the large page. * @@ -1525,6 +1554,7 @@ static int __change_page_attr(struct cpa if (level == PG_LEVEL_4K) { pte_t new_pte; + pgprot_t old_prot = pte_pgprot(old_pte); pgprot_t new_prot = pte_pgprot(old_pte); unsigned long pfn = pte_pfn(old_pte); @@ -1536,6 +1566,8 @@ static int __change_page_attr(struct cpa new_prot = static_protections(new_prot, address, pfn, 1, 0, CPA_PROTECT); + new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1); + new_prot = pgprot_clear_protnone_bits(new_prot); /*
Powered by blists - more mailing lists