[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20220830194132.962932-6-oliver.upton@linux.dev>
Date: Tue, 30 Aug 2022 19:41:23 +0000
From: Oliver Upton <oliver.upton@...ux.dev>
To: Marc Zyngier <maz@...nel.org>, James Morse <james.morse@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Quentin Perret <qperret@...gle.com>,
Ricardo Koller <ricarkol@...gle.com>,
Reiji Watanabe <reijiw@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Ben Gardon <bgardon@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Gavin Shan <gshan@...hat.com>, Peter Xu <peterx@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Oliver Upton <oliver.upton@...ux.dev>
Cc: linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 05/14] KVM: arm64: Split init and set for table PTE
Create a helper to initialize a stage-2 table and directly call
smp_store_release() to install it.
A subsequent change to KVM will tweak the way we traverse the page
tables, requiring that the visitor callbacks steer the walker down a
newly installed table. Furthermore, when stage-2 faults are serviced
in parallel the PTE must be considered volatile, so walkers will need
to stash a pointer to the new table.
Signed-off-by: Oliver Upton <oliver.upton@...ux.dev>
---
arch/arm64/kvm/hyp/pgtable.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 430753fbb727..331f6e3b2c20 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -142,16 +142,13 @@ static void kvm_clear_pte(kvm_pte_t *ptep)
WRITE_ONCE(*ptep, 0);
}
-static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
- struct kvm_pgtable_mm_ops *mm_ops)
+static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
{
- kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
+ kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
pte |= KVM_PTE_VALID;
-
- WARN_ON(kvm_pte_valid(old));
- smp_store_release(ptep, pte);
+ return pte;
}
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
@@ -413,7 +410,7 @@ static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *pte
static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, kvm_pte_t *old,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
- kvm_pte_t *childp;
+ kvm_pte_t *childp, new;
struct hyp_map_data *data = arg;
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
@@ -427,8 +424,10 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, kvm_pte
if (!childp)
return -ENOMEM;
- kvm_set_table_pte(ptep, childp, mm_ops);
+ new = kvm_init_table_pte(childp, mm_ops);
mm_ops->get_page(ptep);
+ smp_store_release(ptep, new);
+
return 0;
}
@@ -804,7 +803,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
kvm_pte_t *old, struct stage2_map_data *data)
{
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
- kvm_pte_t *childp, pte = *old;
+ kvm_pte_t *childp, pte = *old, new;
int ret;
ret = stage2_map_walker_try_leaf(addr, end, level, ptep, pte, data);
@@ -830,8 +829,9 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
if (stage2_pte_is_counted(pte))
stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
- kvm_set_table_pte(ptep, childp, mm_ops);
+ new = kvm_init_table_pte(childp, mm_ops);
mm_ops->get_page(ptep);
+ smp_store_release(ptep, new);
return 0;
}
--
2.37.2.672.g94769d06f0-goog
Powered by blists - more mailing lists