[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170711190554.zxkpjeg2bt65wtir@black.fi.intel.com>
Date: Tue, 11 Jul 2017 22:05:54 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: Andrey Ryabinin <aryabinin@...tuozzo.com>
Cc: "Kirill A. Shutemov" <kirill@...temov.name>,
Andy Lutomirski <luto@...nel.org>,
Dmitry Vyukov <dvyukov@...gle.com>,
Alexander Potapenko <glider@...gle.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrew Morton <akpm@...ux-foundation.org>,
"x86@...nel.org" <x86@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>, Andi Kleen <ak@...ux.intel.com>,
Dave Hansen <dave.hansen@...el.com>,
linux-arch <linux-arch@...r.kernel.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
LKML <linux-kernel@...r.kernel.org>,
kasan-dev <kasan-dev@...glegroups.com>
Subject: Re: KASAN vs. boot-time switching between 4- and 5-level paging
> > Can use your Signed-off-by for a [cleaned up version of your] patch?
>
> Sure.
Another KASAN-releated issue: dumping page tables for KASAN shadow memory
region takes unreasonable time due to kasan_zero_p?? mapped there.
The patch below helps. Any objections?
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index b371ab68f2d4..8601153c34e7 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -17,8 +17,8 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/kasan.h>
-#include <asm/kasan.h>
#include <asm/pgtable.h>
/*
@@ -291,10 +291,15 @@ static void note_page(struct seq_file *m, struct pg_state *st,
static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, unsigned long P)
{
int i;
+ unsigned long pte_addr;
pte_t *start;
pgprotval_t prot;
- start = (pte_t *)pmd_page_vaddr(addr);
+ pte_addr = pmd_page_vaddr(addr);
+ if (__pa(pte_addr) == __pa(kasan_zero_pte))
+ return;
+
+ start = (pte_t *)pte_addr;
for (i = 0; i < PTRS_PER_PTE; i++) {
prot = pte_flags(*start);
st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
@@ -308,10 +313,15 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, unsigned long P)
{
int i;
+ unsigned long pmd_addr;
pmd_t *start;
pgprotval_t prot;
- start = (pmd_t *)pud_page_vaddr(addr);
+ pmd_addr = pud_page_vaddr(addr);
+ if (__pa(pmd_addr) == __pa(kasan_zero_pmd))
+ return;
+
+ start = (pmd_t *)pmd_addr;
for (i = 0; i < PTRS_PER_PMD; i++) {
st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
if (!pmd_none(*start)) {
@@ -350,12 +360,16 @@ static bool pud_already_checked(pud_t *prev_pud, pud_t *pud, bool checkwx)
static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, unsigned long P)
{
int i;
+ unsigned long pud_addr;
pud_t *start;
pgprotval_t prot;
pud_t *prev_pud = NULL;
- start = (pud_t *)p4d_page_vaddr(addr);
+ pud_addr = p4d_page_vaddr(addr);
+ if (__pa(pud_addr) == __pa(kasan_zero_pud))
+ return;
+ start = (pud_t *)pud_addr;
for (i = 0; i < PTRS_PER_PUD; i++) {
st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
if (!pud_none(*start) &&
@@ -386,11 +400,15 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, unsigned long P)
{
int i;
+ unsigned long p4d_addr;
p4d_t *start;
pgprotval_t prot;
- start = (p4d_t *)pgd_page_vaddr(addr);
+ p4d_addr = pgd_page_vaddr(addr);
+ if (__pa(p4d_addr) == __pa(kasan_zero_p4d))
+ return;
+ start = (p4d_t *)p4d_addr;
for (i = 0; i < PTRS_PER_P4D; i++) {
st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
if (!p4d_none(*start)) {
--
Kirill A. Shutemov
Powered by blists - more mailing lists