diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index ff5c7134a37a..5cdb3a1f3995 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -37,34 +37,13 @@ static inline bool arch_kfence_init_pool(void) return true; } -/* Protect the given page and flush TLB. */ static inline bool kfence_protect_page(unsigned long addr, bool protect) { - unsigned int level; - pte_t *pte = lookup_address(addr, &level); - - if (WARN_ON(!pte || level != PG_LEVEL_4K)) - return false; - - /* - * We need to avoid IPIs, as we may get KFENCE allocations or faults - * with interrupts disabled. Therefore, the below is best-effort, and - * does not flush TLBs on all CPUs. We can tolerate some inaccuracy; - * lazy fault handling takes care of faults after the page is PRESENT. - */ - if (protect) - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + set_memory_np(addr, addr + PAGE_SIZE); else - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + set_memory_p(addr, addr + PAGE_SIZE); - /* - * Flush this CPU's TLB, assuming whoever did the allocation/free is - * likely to continue running on this CPU. - */ - preempt_disable(); - flush_tlb_one_kernel(addr); - preempt_enable(); return true; }