[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <efb9af41-21ed-4b97-8c67-40d6cda10484@redhat.com>
Date: Wed, 14 Aug 2024 20:06:32 +0200
From: Paolo Bonzini <pbonzini@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Chao Gao <chao.gao@...el.com>, Kai Huang <kai.huang@...el.com>
Subject: Re: [PATCH v3 1/8] KVM: Use dedicated mutex to protect
kvm_usage_count to avoid deadlock
On 6/8/24 02:06, Sean Christopherson wrote:
> Use a dedicated mutex to guard kvm_usage_count to fix a potential deadlock
> on x86 due to a chain of locks and SRCU synchronizations. Translating the
> below lockdep splat, CPU1 #6 will wait on CPU0 #1, CPU0 #8 will wait on
> CPU2 #3, and CPU2 #7 will wait on CPU1 #4 (if there's a writer, due to the
> fairness of r/w semaphores).
>
> CPU0 CPU1 CPU2
> 1 lock(&kvm->slots_lock);
> 2 lock(&vcpu->mutex);
> 3 lock(&kvm->srcu);
> 4 lock(cpu_hotplug_lock);
> 5 lock(kvm_lock);
> 6 lock(&kvm->slots_lock);
> 7 lock(cpu_hotplug_lock);
> 8 sync(&kvm->srcu);
>
> Note, there are likely more potential deadlocks in KVM x86, e.g. the same
> pattern of taking cpu_hotplug_lock outside of kvm_lock likely exists with
> __kvmclock_cpufreq_notifier()
Offhand I couldn't see any places where {,__}cpufreq_driver_target() is
called within cpus_read_lock(). I didn't look too closely though.
> +``kvm_usage_count``
> +^^^^^^^^^^^^^^^^^^^
``kvm_usage_lock``
Paolo
> +
> +:Type: mutex
> +:Arch: any
> +:Protects: - kvm_usage_count
> - hardware virtualization enable/disable
> :Comment: KVM also disables CPU hotplug via cpus_read_lock() during
> enable/disable.
> @@ -290,11 +296,12 @@ time it will be set using the Dirty tracking mechanism described above.
> wakeup.
>
> ``vendor_module_lock``
> -^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> +^^^^^^^^^^^^^^^^^^^^^^
> :Type: mutex
> :Arch: x86
> :Protects: loading a vendor module (kvm_amd or kvm_intel)
> -:Comment: Exists because using kvm_lock leads to deadlock. cpu_hotplug_lock is
> - taken outside of kvm_lock, e.g. in KVM's CPU online/offline callbacks, and
> - many operations need to take cpu_hotplug_lock when loading a vendor module,
> - e.g. updating static calls.
> +:Comment: Exists because using kvm_lock leads to deadlock. kvm_lock is taken
> + in notifiers, e.g. __kvmclock_cpufreq_notifier(), that may be invoked while
> + cpu_hotplug_lock is held, e.g. from cpufreq_boost_trigger_state(), and many
> + operations need to take cpu_hotplug_lock when loading a vendor module, e.g.
> + updating static calls.
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 4965196cad58..d9b0579d3eea 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -5499,6 +5499,7 @@ __visible bool kvm_rebooting;
> EXPORT_SYMBOL_GPL(kvm_rebooting);
>
> static DEFINE_PER_CPU(bool, hardware_enabled);
> +static DEFINE_MUTEX(kvm_usage_lock);
> static int kvm_usage_count;
>
> static int __hardware_enable_nolock(void)
> @@ -5531,10 +5532,10 @@ static int kvm_online_cpu(unsigned int cpu)
> * be enabled. Otherwise running VMs would encounter unrecoverable
> * errors when scheduled to this CPU.
> */
> - mutex_lock(&kvm_lock);
> + mutex_lock(&kvm_usage_lock);
> if (kvm_usage_count)
> ret = __hardware_enable_nolock();
> - mutex_unlock(&kvm_lock);
> + mutex_unlock(&kvm_usage_lock);
> return ret;
> }
>
> @@ -5554,10 +5555,10 @@ static void hardware_disable_nolock(void *junk)
>
> static int kvm_offline_cpu(unsigned int cpu)
> {
> - mutex_lock(&kvm_lock);
> + mutex_lock(&kvm_usage_lock);
> if (kvm_usage_count)
> hardware_disable_nolock(NULL);
> - mutex_unlock(&kvm_lock);
> + mutex_unlock(&kvm_usage_lock);
> return 0;
> }
>
> @@ -5573,9 +5574,9 @@ static void hardware_disable_all_nolock(void)
> static void hardware_disable_all(void)
> {
> cpus_read_lock();
> - mutex_lock(&kvm_lock);
> + mutex_lock(&kvm_usage_lock);
> hardware_disable_all_nolock();
> - mutex_unlock(&kvm_lock);
> + mutex_unlock(&kvm_usage_lock);
> cpus_read_unlock();
> }
>
> @@ -5606,7 +5607,7 @@ static int hardware_enable_all(void)
> * enable hardware multiple times.
> */
> cpus_read_lock();
> - mutex_lock(&kvm_lock);
> + mutex_lock(&kvm_usage_lock);
>
> r = 0;
>
> @@ -5620,7 +5621,7 @@ static int hardware_enable_all(void)
> }
> }
>
> - mutex_unlock(&kvm_lock);
> + mutex_unlock(&kvm_usage_lock);
> cpus_read_unlock();
>
> return r;
> @@ -5648,13 +5649,13 @@ static int kvm_suspend(void)
> {
> /*
> * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
> - * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
> - * is stable. Assert that kvm_lock is not held to ensure the system
> - * isn't suspended while KVM is enabling hardware. Hardware enabling
> - * can be preempted, but the task cannot be frozen until it has dropped
> - * all locks (userspace tasks are frozen via a fake signal).
> + * callbacks, i.e. no need to acquire kvm_usage_lock to ensure the usage
> + * count is stable. Assert that kvm_usage_lock is not held to ensure
> + * the system isn't suspended while KVM is enabling hardware. Hardware
> + * enabling can be preempted, but the task cannot be frozen until it has
> + * dropped all locks (userspace tasks are frozen via a fake signal).
> */
> - lockdep_assert_not_held(&kvm_lock);
> + lockdep_assert_not_held(&kvm_usage_lock);
> lockdep_assert_irqs_disabled();
>
> if (kvm_usage_count)
> @@ -5664,7 +5665,7 @@ static int kvm_suspend(void)
>
> static void kvm_resume(void)
> {
> - lockdep_assert_not_held(&kvm_lock);
> + lockdep_assert_not_held(&kvm_usage_lock);
> lockdep_assert_irqs_disabled();
>
> if (kvm_usage_count)
Powered by blists - more mailing lists