lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201017175436.17116-1-lihaiwei.kernel@gmail.com>
Date:   Sun, 18 Oct 2020 01:54:36 +0800
From:   lihaiwei.kernel@...il.com
To:     kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc:     pbonzini@...hat.com, sean.j.christopherson@...el.com,
        vkuznets@...hat.com, wanpengli@...cent.com, jmattson@...gle.com,
        joro@...tes.org, Haiwei Li <lihaiwei@...cent.com>
Subject: [PATCH v4] KVM: Check the allocation of pv cpu mask

From: Haiwei Li <lihaiwei@...cent.com>

check the allocation of per-cpu __pv_cpu_mask. Init
'send_IPI_mask_allbutself' only when successful and check the allocation
of __pv_cpu_mask in 'kvm_flush_tlb_others'.

Suggested-by: Vitaly Kuznetsov <vkuznets@...hat.com>
Signed-off-by: Haiwei Li <lihaiwei@...cent.com>
---
v1 -> v2:
 * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
v2 -> v3:
 * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
v3 -> v4:
 * mov kvm_setup_pv_ipi to kvm_alloc_cpumask and get rid of kvm_apic_init

 arch/x86/kernel/kvm.c | 53 +++++++++++++++++++++++++++++--------------
 1 file changed, 36 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 42c6e0deff9e..be28203cc098 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -547,16 +547,6 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 	__send_ipi_mask(local_mask, vector);
 }
 
-/*
- * Set the IPI entry points
- */
-static void kvm_setup_pv_ipi(void)
-{
-	apic->send_IPI_mask = kvm_send_ipi_mask;
-	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
-	pr_info("setup PV IPIs\n");
-}
-
 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
 {
 	int cpu;
@@ -619,6 +609,11 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
 	struct kvm_steal_time *src;
 	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
 
+	if (unlikely(!flushmask)) {
+		native_flush_tlb_others(cpumask, info);
+		return;
+	}
+
 	cpumask_copy(flushmask, cpumask);
 	/*
 	 * We have to call flush only on online vCPUs. And
@@ -732,10 +727,6 @@ static uint32_t __init kvm_detect(void)
 
 static void __init kvm_apic_init(void)
 {
-#if defined(CONFIG_SMP)
-	if (pv_ipi_supported())
-		kvm_setup_pv_ipi();
-#endif
 }
 
 static void __init kvm_init_platform(void)
@@ -765,10 +756,18 @@ static __init int activate_jump_labels(void)
 }
 arch_initcall(activate_jump_labels);
 
+static void kvm_free_cpumask(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu)
+		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+}
+
 static __init int kvm_alloc_cpumask(void)
 {
 	int cpu;
-	bool alloc = false;
+	bool alloc = false, alloced = true;
 
 	if (!kvm_para_available() || nopv)
 		return 0;
@@ -783,10 +782,30 @@ static __init int kvm_alloc_cpumask(void)
 
 	if (alloc)
 		for_each_possible_cpu(cpu) {
-			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
-				GFP_KERNEL, cpu_to_node(cpu));
+			if (!zalloc_cpumask_var_node(
+				per_cpu_ptr(&__pv_cpu_mask, cpu),
+				GFP_KERNEL, cpu_to_node(cpu))) {
+				alloced = false;
+				break;
+			}
 		}
 
+#if defined(CONFIG_SMP)
+	/* Set the IPI entry points */
+	if (pv_ipi_supported()) {
+		apic->send_IPI_mask = kvm_send_ipi_mask;
+		if (alloced)
+			apic->send_IPI_mask_allbutself =
+				kvm_send_ipi_mask_allbutself;
+		pr_info("setup PV IPIs\n");
+	}
+#endif
+
+	if (!alloced) {
+		kvm_free_cpumask();
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 arch_initcall(kvm_alloc_cpumask);
-- 
2.18.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ