lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87y2lrnnyf.fsf@vitty.brq.redhat.com>
Date:   Thu, 03 Sep 2020 12:39:52 +0200
From:   Vitaly Kuznetsov <vkuznets@...hat.com>
To:     Haiwei Li <lihaiwei.kernel@...il.com>
Cc:     "pbonzini\@redhat.com" <pbonzini@...hat.com>,
        Sean Christopherson <sean.j.christopherson@...el.com>,
        "wanpengli\@tencent.com" <wanpengli@...cent.com>,
        "jmattson\@google.com" <jmattson@...gle.com>,
        "joro\@8bytes.org" <joro@...tes.org>, tglx@...utronix.de,
        mingo@...hat.com, "bp\@alien8.de" <bp@...en8.de>,
        "hpa\@zytor.com" <hpa@...or.com>,
        "linux-kernel\@vger.kernel.org" <linux-kernel@...r.kernel.org>,
        "kvm\@vger.kernel.org" <kvm@...r.kernel.org>,
        "x86\@kernel.org" <x86@...nel.org>
Subject: Re: [PATCH v2] KVM: Check the allocation of pv cpu mask

Haiwei Li <lihaiwei.kernel@...il.com> writes:

> From: Haiwei Li <lihaiwei@...cent.com>
>
> check the allocation of per-cpu __pv_cpu_mask. Initialize ops only when
> successful.
>
> Signed-off-by: Haiwei Li <lihaiwei@...cent.com>
> ---
>   arch/x86/kernel/kvm.c | 24 ++++++++++++++++++++----
>   1 file changed, 20 insertions(+), 4 deletions(-)
>
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 08320b0b2b27..d3c062e551d7 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -555,7 +555,6 @@ static void kvm_send_ipi_mask_allbutself(const 
> struct cpumask *mask, int vector)
>   static void kvm_setup_pv_ipi(void)
>   {
>   	apic->send_IPI_mask = kvm_send_ipi_mask;
> -	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
>   	pr_info("setup PV IPIs\n");
>   }
>
> @@ -654,7 +653,6 @@ static void __init kvm_guest_init(void)
>   	}
>
>   	if (pv_tlb_flush_supported()) {
> -		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
>   		pv_ops.mmu.tlb_remove_table = tlb_remove_table;
>   		pr_info("KVM setup pv remote TLB flush\n");
>   	}
> @@ -767,6 +765,14 @@ static __init int activate_jump_labels(void)
>   }
>   arch_initcall(activate_jump_labels);
>
> +static void kvm_free_pv_cpu_mask(void)
> +{
> +	unsigned int cpu;
> +
> +	for_each_possible_cpu(cpu)
> +		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
> +}
> +
>   static __init int kvm_alloc_cpumask(void)
>   {
>   	int cpu;
> @@ -785,11 +791,21 @@ static __init int kvm_alloc_cpumask(void)
>
>   	if (alloc)
>   		for_each_possible_cpu(cpu) {
> -			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
> -				GFP_KERNEL, cpu_to_node(cpu));
> +			if (!zalloc_cpumask_var_node(
> +				per_cpu_ptr(&__pv_cpu_mask, cpu),
> +				GFP_KERNEL, cpu_to_node(cpu)))
> +				goto zalloc_cpumask_fail;
>   		}
>
> +#if defined(CONFIG_SMP)
> +	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
> +#endif
> +	pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;

This is too late I'm afraid. If I'm not mistaken PV patching happens
earlier, so .init.guest_late_init (kvm_guest_init()) is good and
arch_initcall() is bad.

Have you checked that with this patch kvm_flush_tlb_others() is still
being called?

Actually, there is no need to assign kvm_flush_tlb_others() so late. We
can always check if __pv_cpu_mask was allocated and revert back to the
architectural path if not.

>   	return 0;
> +
> +zalloc_cpumask_fail:
> +	kvm_free_pv_cpu_mask();
> +	return -ENOMEM;
>   }
>   arch_initcall(kvm_alloc_cpumask);
>
> --
> 2.18.4
>

-- 
Vitaly

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ