[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1562855138-19507-7-git-send-email-alexandre.chartre@oracle.com>
Date: Thu, 11 Jul 2019 16:25:18 +0200
From: Alexandre Chartre <alexandre.chartre@...cle.com>
To: pbonzini@...hat.com, rkrcmar@...hat.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
kvm@...r.kernel.org, x86@...nel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: konrad.wilk@...cle.com, jan.setjeeilers@...cle.com,
liran.alon@...cle.com, jwadams@...gle.com, graf@...zon.de,
rppt@...ux.vnet.ibm.com, alexandre.chartre@...cle.com
Subject: [RFC v2 06/26] mm/asi: Add ASI page-table entry allocation functions
Add functions to allocate p4d/pud/pmd/pte pages for an ASI page-table
and keep track of them.
Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
arch/x86/mm/asi_pagetable.c | 111 +++++++++++++++++++++++++++++++++++++++++++
1 files changed, 111 insertions(+), 0 deletions(-)
diff --git a/arch/x86/mm/asi_pagetable.c b/arch/x86/mm/asi_pagetable.c
index a89e02e..0fc6d59 100644
--- a/arch/x86/mm/asi_pagetable.c
+++ b/arch/x86/mm/asi_pagetable.c
@@ -4,6 +4,8 @@
*
*/
+#include <linux/mm.h>
+
#include <asm/asi.h>
/*
@@ -159,3 +161,112 @@ static bool asi_valid_offset(struct asi *asi, void *offset)
return p4d;
}
+
+/*
+ * asi_pXX_alloc() functions are equivalent to kernel pXX_alloc() functions
+ * but, in addition, they keep track of new pages allocated for the specified
+ * ASI.
+ */
+
+static pte_t *asi_pte_alloc(struct asi *asi, pmd_t *pmd, unsigned long addr)
+{
+ struct page *page;
+ pte_t *pte;
+ int err;
+
+ if (pmd_none(*pmd)) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+ pte = (pte_t *)page_address(page);
+ err = asi_add_backend_page(asi, pte, PGT_LEVEL_PTE);
+ if (err) {
+ free_page((unsigned long)pte);
+ return ERR_PTR(err);
+ }
+ set_pmd_safe(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+ pte = pte_offset_map(pmd, addr);
+ } else {
+ pte = asi_pte_offset(asi, pmd, addr);
+ }
+
+ return pte;
+}
+
+static pmd_t *asi_pmd_alloc(struct asi *asi, pud_t *pud, unsigned long addr)
+{
+ struct page *page;
+ pmd_t *pmd;
+ int err;
+
+ if (pud_none(*pud)) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+ pmd = (pmd_t *)page_address(page);
+ err = asi_add_backend_page(asi, pmd, PGT_LEVEL_PMD);
+ if (err) {
+ free_page((unsigned long)pmd);
+ return ERR_PTR(err);
+ }
+ set_pud_safe(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+ pmd = pmd_offset(pud, addr);
+ } else {
+ pmd = asi_pmd_offset(asi, pud, addr);
+ }
+
+ return pmd;
+}
+
+static pud_t *asi_pud_alloc(struct asi *asi, p4d_t *p4d, unsigned long addr)
+{
+ struct page *page;
+ pud_t *pud;
+ int err;
+
+ if (p4d_none(*p4d)) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+ pud = (pud_t *)page_address(page);
+ err = asi_add_backend_page(asi, pud, PGT_LEVEL_PUD);
+ if (err) {
+ free_page((unsigned long)pud);
+ return ERR_PTR(err);
+ }
+ set_p4d_safe(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
+ pud = pud_offset(p4d, addr);
+ } else {
+ pud = asi_pud_offset(asi, p4d, addr);
+ }
+
+ return pud;
+}
+
+static p4d_t *asi_p4d_alloc(struct asi *asi, pgd_t *pgd, unsigned long addr)
+{
+ struct page *page;
+ p4d_t *p4d;
+ int err;
+
+ if (!pgtable_l5_enabled())
+ return (p4d_t *)pgd;
+
+ if (pgd_none(*pgd)) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+ p4d = (p4d_t *)page_address(page);
+ err = asi_add_backend_page(asi, p4d, PGT_LEVEL_P4D);
+ if (err) {
+ free_page((unsigned long)p4d);
+ return ERR_PTR(err);
+ }
+ set_pgd_safe(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
+ p4d = p4d_offset(pgd, addr);
+ } else {
+ p4d = asi_p4d_offset(asi, pgd, addr);
+ }
+
+ return p4d;
+}
--
1.7.1
Powered by blists - more mailing lists