[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200108152100.7630-2-sergey.dyasli@citrix.com>
Date: Wed, 8 Jan 2020 15:20:57 +0000
From: Sergey Dyasli <sergey.dyasli@...rix.com>
To: <xen-devel@...ts.xen.org>, <kasan-dev@...glegroups.com>,
<linux-mm@...ck.org>, <linux-kernel@...r.kernel.org>
CC: Andrey Ryabinin <aryabinin@...tuozzo.com>,
Alexander Potapenko <glider@...gle.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Juergen Gross <jgross@...e.com>,
"Stefano Stabellini" <sstabellini@...nel.org>,
George Dunlap <george.dunlap@...rix.com>,
Ross Lagerwall <ross.lagerwall@...rix.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Sergey Dyasli <sergey.dyasli@...rix.com>
Subject: [PATCH v1 1/4] kasan: introduce set_pmd_early_shadow()
It is incorrect to call pmd_populate_kernel() multiple times for the
same page table. Xen notices it during kasan_populate_early_shadow():
(XEN) mm.c:3222:d155v0 mfn 3704b already pinned
This happens for kasan_early_shadow_pte when USE_SPLIT_PTE_PTLOCKS is
enabled. Fix this by introducing set_pmd_early_shadow() which calls
pmd_populate_kernel() only once and uses set_pmd() afterwards.
Signed-off-by: Sergey Dyasli <sergey.dyasli@...rix.com>
---
RFC --> v1:
- New patch
---
mm/kasan/init.c | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index ce45c491ebcd..a4077320777f 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -81,6 +81,19 @@ static inline bool kasan_early_shadow_page_entry(pte_t pte)
return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
}
+static inline void set_pmd_early_shadow(pmd_t *pmd)
+{
+ static bool pmd_populated = false;
+ pte_t *early_shadow = lm_alias(kasan_early_shadow_pte);
+
+ if (likely(pmd_populated)) {
+ set_pmd(pmd, __pmd(__pa(early_shadow) | _PAGE_TABLE));
+ } else {
+ pmd_populate_kernel(&init_mm, pmd, early_shadow);
+ pmd_populated = true;
+ }
+}
+
static __init void *early_alloc(size_t size, int node)
{
void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
@@ -120,8 +133,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
next = pmd_addr_end(addr, end);
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ set_pmd_early_shadow(pmd);
continue;
}
@@ -157,8 +169,7 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ set_pmd_early_shadow(pmd);
continue;
}
@@ -198,8 +209,7 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ set_pmd_early_shadow(pmd);
continue;
}
@@ -271,8 +281,7 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ set_pmd_early_shadow(pmd);
continue;
}
--
2.17.1
Powered by blists - more mailing lists