[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1557758315-12667-13-git-send-email-alexandre.chartre@oracle.com>
Date: Mon, 13 May 2019 16:38:20 +0200
From: Alexandre Chartre <alexandre.chartre@...cle.com>
To: pbonzini@...hat.com, rkrcmar@...hat.com, tglx@...utronix.de,
mingo@...hat.com, bp@...en8.de, hpa@...or.com,
dave.hansen@...ux.intel.com, luto@...nel.org, peterz@...radead.org,
kvm@...r.kernel.org, x86@...nel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: konrad.wilk@...cle.com, jan.setjeeilers@...cle.com,
liran.alon@...cle.com, jwadams@...gle.com,
alexandre.chartre@...cle.com
Subject: [RFC KVM 12/27] kvm/isolation: add KVM page table entry allocation functions
These functions allocate p4d/pud/pmd/pte pages and ensure that
pages are in the KVM page table.
Signed-off-by: Alexandre Chartre <alexandre.chartre@...cle.com>
---
arch/x86/kvm/isolation.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 94 insertions(+), 0 deletions(-)
diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
index b29a09b..6ec86df 100644
--- a/arch/x86/kvm/isolation.c
+++ b/arch/x86/kvm/isolation.c
@@ -248,6 +248,100 @@ static inline void kvm_p4d_free(struct mm_struct *mm, p4d_t *p4d)
p4d_free(mm, PGTD_ALIGN(p4d));
}
+/*
+ * kvm_pXX_alloc() functions are equivalent to kernel pXX_alloc()
+ * functions but, in addition, they ensure that page table pointers
+ * are in the KVM page table. Otherwise an error is returned.
+ */
+
+static pte_t *kvm_pte_alloc(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr)
+{
+ pte_t *pte;
+
+ if (pmd_none(*pmd)) {
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte) {
+ pr_debug("PTE: ERR ALLOC\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ if (!kvm_add_pgt_directory(pte, PGT_LEVEL_PTE)) {
+ kvm_pte_free(mm, pte);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ pte = kvm_pte_offset(pmd, addr);
+ }
+
+ return pte;
+}
+
+static pmd_t *kvm_pmd_alloc(struct mm_struct *mm, pud_t *pud,
+ unsigned long addr)
+{
+ pmd_t *pmd;
+
+ if (pud_none(*pud)) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd) {
+ pr_debug("PMD: ERR ALLOC\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ if (!kvm_add_pgt_directory(pmd, PGT_LEVEL_PMD)) {
+ kvm_pmd_free(mm, pmd);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ pmd = kvm_pmd_offset(pud, addr);
+ }
+
+ return pmd;
+}
+
+static pud_t *kvm_pud_alloc(struct mm_struct *mm, p4d_t *p4d,
+ unsigned long addr)
+{
+ pud_t *pud;
+
+ if (p4d_none(*p4d)) {
+ pud = pud_alloc(mm, p4d, addr);
+ if (!pud) {
+ pr_debug("PUD: ERR ALLOC\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ if (!kvm_add_pgt_directory(pud, PGT_LEVEL_PUD)) {
+ kvm_pud_free(mm, pud);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ pud = kvm_pud_offset(p4d, addr);
+ }
+
+ return pud;
+}
+
+static p4d_t *kvm_p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr)
+{
+ p4d_t *p4d;
+
+ if (pgd_none(*pgd)) {
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d) {
+ pr_debug("P4D: ERR ALLOC\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ if (!kvm_add_pgt_directory(p4d, PGT_LEVEL_P4D)) {
+ kvm_p4d_free(mm, p4d);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ p4d = kvm_p4d_offset(pgd, addr);
+ }
+
+ return p4d;
+}
+
static int kvm_isolation_init_mm(void)
{
--
1.7.1
Powered by blists - more mailing lists