[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250430202311.364641-4-mlevitsk@redhat.com>
Date: Wed, 30 Apr 2025 16:23:10 -0400
From: Maxim Levitsky <mlevitsk@...hat.com>
To: kvm@...r.kernel.org
Cc: "H. Peter Anvin" <hpa@...or.com>,
x86@...nel.org,
Maxim Levitsky <mlevitsk@...hat.com>,
Randy Dunlap <rdunlap@...radead.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Will Deacon <will@...nel.org>,
Oliver Upton <oliver.upton@...ux.dev>,
Kunkun Jiang <jiangkunkun@...wei.com>,
Jing Zhang <jingzhangos@...gle.com>,
Albert Ou <aou@...s.berkeley.edu>,
Keisuke Nishimura <keisuke.nishimura@...ia.fr>,
Anup Patel <anup@...infault.org>,
Catalin Marinas <catalin.marinas@....com>,
Atish Patra <atishp@...shpatra.org>,
kvmarm@...ts.linux.dev,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
linux-arm-kernel@...ts.infradead.org,
Peter Zijlstra <peterz@...radead.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
Sebastian Ott <sebott@...hat.com>,
Andre Przywara <andre.przywara@....com>,
Ingo Molnar <mingo@...hat.com>,
Alexandre Ghiti <alex@...ti.fr>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Palmer Dabbelt <palmer@...belt.com>,
Joey Gouly <joey.gouly@....com>,
Borislav Petkov <bp@...en8.de>,
Sean Christopherson <seanjc@...gle.com>,
Marc Zyngier <maz@...nel.org>,
Alexander Potapenko <glider@...gle.com>,
Thomas Gleixner <tglx@...utronix.de>,
linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org,
Shusen Li <lishusen2@...wei.com>,
kvm-riscv@...ts.infradead.org
Subject: [PATCH v3 3/4] locking/mutex: implement mutex_lock_killable_nest_lock
KVM's SEV intra-host migration code needs to lock all vCPUs
of the source and the target VM, before it proceeds with the migration.
The number of vCPUs that belong to each VM is not bounded by anything
except a self-imposed KVM limit of CONFIG_KVM_MAX_NR_VCPUS vCPUs which is
significantly larger than the depth of lockdep's lock stack.
Luckily, the locks in both of the cases mentioned above, are held under
the 'kvm->lock' of each VM, which means that we can use the little
known lockdep feature called a "nest_lock" to support this use case in
a cleaner way, compared to the way it's currently done.
Implement and expose 'mutex_lock_killable_nest_lock' for this
purpose.
Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
---
include/linux/mutex.h | 17 +++++++++++++----
kernel/locking/mutex.c | 7 ++++---
2 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index da4518cfd59c..a039fa8c1780 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -156,16 +156,15 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
-
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
-extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
- unsigned int subclass);
+extern int __must_check _mutex_lock_killable(struct mutex *lock,
+ unsigned int subclass, struct lockdep_map *nest_lock);
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
-#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL)
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
#define mutex_lock_nest_lock(lock, nest_lock) \
@@ -174,6 +173,15 @@ do { \
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
+#define mutex_lock_killable_nest_lock(lock, nest_lock) \
+( \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
+ _mutex_lock_killable(lock, 0, &(nest_lock)->dep_map) \
+)
+
+#define mutex_lock_killable_nested(lock, subclass) \
+ _mutex_lock_killable(lock, subclass, NULL)
+
#else
extern void mutex_lock(struct mutex *lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
@@ -183,6 +191,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+# define mutex_lock_killable_nest_lock(lock, nest_lock) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index c75a838d3bae..234923121ff0 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -808,11 +808,12 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
int __sched
-mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
+_mutex_lock_killable(struct mutex *lock, unsigned int subclass,
+ struct lockdep_map *nest)
{
- return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
+ return __mutex_lock(lock, TASK_KILLABLE, subclass, nest, _RET_IP_);
}
-EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
+EXPORT_SYMBOL_GPL(_mutex_lock_killable);
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
--
2.46.0
Powered by blists - more mailing lists