[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251106161117.350395-10-imbrenda@linux.ibm.com>
Date: Thu, 6 Nov 2025 17:11:03 +0100
From: Claudio Imbrenda <imbrenda@...ux.ibm.com>
To: kvm@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, linux-s390@...r.kernel.org,
borntraeger@...ibm.com, frankja@...ux.ibm.com, nsg@...ux.ibm.com,
nrb@...ux.ibm.com, seiden@...ux.ibm.com, schlameuss@...ux.ibm.com,
hca@...ux.ibm.com, svens@...ux.ibm.com, agordeev@...ux.ibm.com,
gor@...ux.ibm.com, david@...hat.com, gerald.schaefer@...ux.ibm.com
Subject: [PATCH v3 09/23] KVM: s390: KVM page table management functions: clear and replace
Add page table management functions to be used for KVM guest (gmap)
page tables.
This patch adds functions to clear, replace or exchange DAT table
entries.
Signed-off-by: Claudio Imbrenda <imbrenda@...ux.ibm.com>
---
arch/s390/kvm/dat.c | 118 ++++++++++++++++++++++++++++++++++++++++++++
arch/s390/kvm/dat.h | 40 +++++++++++++++
2 files changed, 158 insertions(+)
diff --git a/arch/s390/kvm/dat.c b/arch/s390/kvm/dat.c
index c324a27f379f..a9d5b49ac411 100644
--- a/arch/s390/kvm/dat.c
+++ b/arch/s390/kvm/dat.c
@@ -101,3 +101,121 @@ void dat_free_level(struct crst_table *table, bool owns_ptes)
}
dat_free_crst(table);
}
+
+/**
+ * dat_crstep_xchg - exchange a gmap CRSTE with another
+ * @crstep: pointer to the CRST entry
+ * @new: replacement entry
+ * @gfn: the affected guest address
+ * @asce: the ASCE of the address space
+ *
+ * Context: This function is assumed to be called with kvm->mmu_lock held.
+ */
+void dat_crstep_xchg(union crste *crstep, union crste new, gfn_t gfn, union asce asce)
+{
+ if (crstep->h.i) {
+ WRITE_ONCE(*crstep, new);
+ return;
+ } else if (cpu_has_edat2()) {
+ crdte_crste(crstep, *crstep, new, gfn, asce);
+ return;
+ }
+
+ if (machine_has_tlb_guest())
+ idte_crste(crstep, gfn, IDTE_GUEST_ASCE, asce, IDTE_GLOBAL);
+ else
+ idte_crste(crstep, gfn, 0, NULL_ASCE, IDTE_GLOBAL);
+ WRITE_ONCE(*crstep, new);
+}
+
+/**
+ * dat_crstep_xchg_atomic - atomically exchange a gmap CRSTE with another
+ * @crstep: pointer to the CRST entry
+ * @old: expected old value
+ * @new: replacement entry
+ * @gfn: the affected guest address
+ * @asce: the asce of the address space
+ *
+ * This function should only be called on invalid crstes, or on crstes with
+ * FC = 1, as that guarantees the presence of CSPG.
+ *
+ * This function is needed to atomically exchange a CRSTE that potentially
+ * maps a prefix area, without having to invalidate it inbetween.
+ *
+ * Context: This function is assumed to be called with kvm->mmu_lock held.
+ *
+ * Return: true if the exchange was successful.
+ */
+bool dat_crstep_xchg_atomic(union crste *crstep, union crste old, union crste new, gfn_t gfn,
+ union asce asce)
+{
+ if (old.h.i)
+ return arch_try_cmpxchg((long *)crstep, &old.val, new.val);
+ if (cpu_has_edat2())
+ return crdte_crste(crstep, old, new, gfn, asce);
+ return cspg_crste(crstep, old, new);
+}
+
+static void dat_set_storage_key_from_pgste(union pte pte, union pgste pgste)
+{
+ union skey nkey = { .acc = pgste.acc, .fp = pgste.fp };
+
+ page_set_storage_key(pte_origin(pte), nkey.skey, 0);
+}
+
+static void dat_move_storage_key(union pte old, union pte new)
+{
+ page_set_storage_key(pte_origin(new), page_get_storage_key(pte_origin(old)), 1);
+}
+
+static union pgste dat_save_storage_key_into_pgste(union pte pte, union pgste pgste)
+{
+ union skey skey;
+
+ skey.skey = page_get_storage_key(pte_origin(pte));
+
+ pgste.acc = skey.acc;
+ pgste.fp = skey.fp;
+ pgste.gr |= skey.r;
+ pgste.gc |= skey.c;
+
+ return pgste;
+}
+
+union pgste __dat_ptep_xchg(union pte *ptep, union pgste pgste, union pte new, gfn_t gfn,
+ union asce asce, bool uses_skeys)
+{
+ union pte old = READ_ONCE(*ptep);
+
+ /* Updating only the software bits while holding the pgste lock */
+ if (!((ptep->val ^ new.val) & ~_PAGE_SW_BITS)) {
+ WRITE_ONCE(ptep->swbyte, new.swbyte);
+ return pgste;
+ }
+
+ if (!old.h.i) {
+ unsigned long opts = IPTE_GUEST_ASCE | (pgste.nodat ? IPTE_NODAT : 0);
+
+ if (machine_has_tlb_guest())
+ __ptep_ipte(gfn_to_gpa(gfn), (void *)ptep, opts, asce.val, IPTE_GLOBAL);
+ else
+ __ptep_ipte(gfn_to_gpa(gfn), (void *)ptep, 0, 0, IPTE_GLOBAL);
+ }
+
+ if (uses_skeys) {
+ if (old.h.i && !new.h.i)
+ /* Invalid to valid: restore storage keys from PGSTE */
+ dat_set_storage_key_from_pgste(new, pgste);
+ else if (!old.h.i && new.h.i)
+ /* Valid to invalid: save storage keys to PGSTE */
+ pgste = dat_save_storage_key_into_pgste(old, pgste);
+ else if (!old.h.i && !new.h.i)
+ /* Valid to valid: move storage keys */
+ if (old.h.pfra != new.h.pfra)
+ dat_move_storage_key(old, new);
+ /* Invalid to invalid: nothing to do */
+ }
+
+ WRITE_ONCE(*ptep, new);
+ return pgste;
+}
diff --git a/arch/s390/kvm/dat.h b/arch/s390/kvm/dat.h
index 7fcbf80b3858..8bd1f8e0ef91 100644
--- a/arch/s390/kvm/dat.h
+++ b/arch/s390/kvm/dat.h
@@ -431,6 +431,12 @@ struct kvm_s390_mmu_cache {
short int n_rmaps;
};
+union pgste __must_check __dat_ptep_xchg(union pte *ptep, union pgste pgste, union pte new,
+ gfn_t gfn, union asce asce, bool uses_skeys);
+bool dat_crstep_xchg_atomic(union crste *crstep, union crste old, union crste new, gfn_t gfn,
+ union asce asce);
+void dat_crstep_xchg(union crste *crstep, union crste new, gfn_t gfn, union asce asce);
+
void dat_free_level(struct crst_table *table, bool owns_ptes);
struct crst_table *dat_alloc_crst_sleepable(unsigned long init);
@@ -763,6 +769,21 @@ static inline void pgste_set_unlock(union pte *ptep, union pgste pgste)
WRITE_ONCE(*pgste_of(ptep), pgste);
}
+static inline void dat_ptep_xchg(union pte *ptep, union pte new, gfn_t gfn, union asce asce,
+ bool has_skeys)
+{
+ union pgste pgste;
+
+ pgste = pgste_get_lock(ptep);
+ pgste = __dat_ptep_xchg(ptep, pgste, new, gfn, asce, has_skeys);
+ pgste_set_unlock(ptep, pgste);
+}
+
+static inline void dat_ptep_clear(union pte *ptep, gfn_t gfn, union asce asce, bool has_skeys)
+{
+ dat_ptep_xchg(ptep, _PTE_EMPTY, gfn, asce, has_skeys);
+}
+
static inline void dat_free_pt(struct page_table *pt)
{
free_page((unsigned long)pt);
@@ -800,4 +821,23 @@ static inline struct kvm_s390_mmu_cache *kvm_s390_new_mmu_cache(void)
return NULL;
}
+static inline bool dat_pmdp_xchg_atomic(union pmd *pmdp, union pmd old, union pmd new,
+ gfn_t gfn, union asce asce)
+{
+ return dat_crstep_xchg_atomic(_CRSTEP(pmdp), _CRSTE(old), _CRSTE(new), gfn, asce);
+}
+
+static inline bool dat_pudp_xchg_atomic(union pud *pudp, union pud old, union pud new,
+ gfn_t gfn, union asce asce)
+{
+ return dat_crstep_xchg_atomic(_CRSTEP(pudp), _CRSTE(old), _CRSTE(new), gfn, asce);
+}
+
+static inline void dat_crstep_clear(union crste *crstep, gfn_t gfn, union asce asce)
+{
+ union crste newcrste = _CRSTE_EMPTY(crstep->h.tt);
+
+ dat_crstep_xchg(crstep, newcrste, gfn, asce);
+}
+
#endif /* __KVM_S390_DAT_H */
--
2.51.1
Powered by blists - more mailing lists