lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240213093250.3960069-13-oliver.upton@linux.dev>
Date: Tue, 13 Feb 2024 09:32:49 +0000
From: Oliver Upton <oliver.upton@...ux.dev>
To: kvmarm@...ts.linux.dev
Cc: kvm@...r.kernel.org,
	Marc Zyngier <maz@...nel.org>,
	James Morse <james.morse@....com>,
	Suzuki K Poulose <suzuki.poulose@....com>,
	Zenghui Yu <yuzenghui@...wei.com>,
	linux-kernel@...r.kernel.org,
	Oliver Upton <oliver.upton@...ux.dev>
Subject: [PATCH v2 12/23] KVM: arm64: vgic-its: Lazily allocate LPI translation cache

Reusing translation cache entries within a read-side critical section is
fundamentally incompatible with an rculist. As such, we need to allocate
a new entry to replace an eviction and free the removed entry
afterwards.

Take this as an opportunity to remove the eager allocation of
translation cache entries altogether in favor of a lazy allocation model
on cache miss.

Signed-off-by: Oliver Upton <oliver.upton@...ux.dev>
---
 arch/arm64/kvm/vgic/vgic-init.c |  3 --
 arch/arm64/kvm/vgic/vgic-its.c  | 96 +++++++++++++++------------------
 include/kvm/arm_vgic.h          |  1 +
 3 files changed, 45 insertions(+), 55 deletions(-)

diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index e25672d6e846..660d5ce3b610 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -305,9 +305,6 @@ int vgic_init(struct kvm *kvm)
 		}
 	}
 
-	if (vgic_has_its(kvm))
-		vgic_lpi_translation_cache_init(kvm);
-
 	/*
 	 * If we have GICv4.1 enabled, unconditionnaly request enable the
 	 * v4 support so that we get HW-accelerated vSGIs. Otherwise, only
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 50a9addebeed..a7ba20b57264 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -611,12 +611,20 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
 	return irq;
 }
 
+/* Default is 16 cached LPIs per vcpu */
+#define LPI_DEFAULT_PCPU_CACHE_SIZE	16
+
+static unsigned int vgic_its_max_cache_size(struct kvm *kvm)
+{
+	return atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
+}
+
 static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
 				       u32 devid, u32 eventid,
 				       struct vgic_irq *irq)
 {
+	struct vgic_translation_cache_entry *new, *victim = NULL;
 	struct vgic_dist *dist = &kvm->arch.vgic;
-	struct vgic_translation_cache_entry *cte;
 	unsigned long flags;
 	phys_addr_t db;
 
@@ -624,10 +632,11 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
 	if (irq->hw)
 		return;
 
-	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
+	new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
+	if (!new)
+		return;
 
-	if (unlikely(list_empty(&dist->lpi_translation_cache)))
-		goto out;
+	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
 
 	/*
 	 * We could have raced with another CPU caching the same
@@ -635,22 +644,17 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
 	 * already
 	 */
 	db = its->vgic_its_base + GITS_TRANSLATER;
-	if (__vgic_its_check_cache(dist, db, devid, eventid))
+	if (__vgic_its_check_cache(dist, db, devid, eventid)) {
+		kfree(new);
 		goto out;
+	}
 
-	/* Always reuse the last entry (LRU policy) */
-	cte = list_last_entry(&dist->lpi_translation_cache,
-			      typeof(*cte), entry);
-
-	/*
-	 * Caching the translation implies having an extra reference
-	 * to the interrupt, so drop the potential reference on what
-	 * was in the cache, and increment it on the new interrupt.
-	 */
-	if (cte->irq) {
-		KVM_VM_TRACE_EVENT(kvm, vgic_its_trans_cache_victim, cte->db,
-				   cte->devid, cte->eventid, cte->irq->intid);
-		vgic_put_irq(kvm, cte->irq);
+	if (dist->lpi_cache_count >= vgic_its_max_cache_size(kvm)) {
+		/* Always reuse the last entry (LRU policy) */
+		victim = list_last_entry(&dist->lpi_translation_cache,
+				      typeof(*cte), entry);
+		list_del(&victim->entry);
+		dist->lpi_cache_count--;
 	}
 
 	/*
@@ -660,16 +664,33 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
 	lockdep_assert_held(&its->its_lock);
 	vgic_get_irq_kref(irq);
 
-	cte->db		= db;
-	cte->devid	= devid;
-	cte->eventid	= eventid;
-	cte->irq	= irq;
+	new->db		= db;
+	new->devid	= devid;
+	new->eventid	= eventid;
+	new->irq	= irq;
 
 	/* Move the new translation to the head of the list */
-	list_move(&cte->entry, &dist->lpi_translation_cache);
+	list_add(&new->entry, &dist->lpi_translation_cache);
+	dist->lpi_cache_count++;
 
 out:
 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+
+	if (!victim)
+		return;
+
+	/*
+	 * Caching the translation implies having an extra reference
+	 * to the interrupt, so drop the potential reference on what
+	 * was in the cache, and increment it on the new interrupt.
+	 */
+	if (victim->irq) {
+		KVM_VM_TRACE_EVENT(kvm, vgic_its_trans_cache_victim, victim->db,
+				   victim->devid, victim->eventid, victim->irq->intid);
+		vgic_put_irq(kvm, victim->irq);
+	}
+
+	kfree(victim);
 }
 
 void vgic_its_invalidate_cache(struct kvm *kvm)
@@ -1917,33 +1938,6 @@ static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
 	return ret;
 }
 
-/* Default is 16 cached LPIs per vcpu */
-#define LPI_DEFAULT_PCPU_CACHE_SIZE	16
-
-void vgic_lpi_translation_cache_init(struct kvm *kvm)
-{
-	struct vgic_dist *dist = &kvm->arch.vgic;
-	unsigned int sz;
-	int i;
-
-	if (!list_empty(&dist->lpi_translation_cache))
-		return;
-
-	sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
-
-	for (i = 0; i < sz; i++) {
-		struct vgic_translation_cache_entry *cte;
-
-		/* An allocation failure is not fatal */
-		cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT);
-		if (WARN_ON(!cte))
-			break;
-
-		INIT_LIST_HEAD(&cte->entry);
-		list_add(&cte->entry, &dist->lpi_translation_cache);
-	}
-}
-
 void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
 {
 	struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1990,8 +1984,6 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
 			kfree(its);
 			return ret;
 		}
-
-		vgic_lpi_translation_cache_init(dev->kvm);
 	}
 
 	mutex_init(&its->its_lock);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 47035946648e..431d05c01a53 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -281,6 +281,7 @@ struct vgic_dist {
 
 	/* LPI translation cache */
 	struct list_head	lpi_translation_cache;
+	unsigned int		lpi_cache_count;
 
 	/* used by vgic-debug */
 	struct vgic_state_iter *iter;
-- 
2.43.0.687.g38aa6559b0-goog


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ