[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250430203013.366479-5-mlevitsk@redhat.com>
Date: Wed, 30 Apr 2025 16:30:12 -0400
From: Maxim Levitsky <mlevitsk@...hat.com>
To: kvm@...r.kernel.org
Cc: linux-riscv@...ts.infradead.org,
Kunkun Jiang <jiangkunkun@...wei.com>,
Waiman Long <longman@...hat.com>,
linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
Catalin Marinas <catalin.marinas@....com>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Boqun Feng <boqun.feng@...il.com>,
Borislav Petkov <bp@...en8.de>,
Albert Ou <aou@...s.berkeley.edu>,
Anup Patel <anup@...infault.org>,
Paul Walmsley <paul.walmsley@...ive.com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Palmer Dabbelt <palmer@...belt.com>,
Alexandre Ghiti <alex@...ti.fr>,
Alexander Potapenko <glider@...gle.com>,
Oliver Upton <oliver.upton@...ux.dev>,
Andre Przywara <andre.przywara@....com>,
x86@...nel.org,
Joey Gouly <joey.gouly@....com>,
Thomas Gleixner <tglx@...utronix.de>,
kvm-riscv@...ts.infradead.org,
Atish Patra <atishp@...shpatra.org>,
Ingo Molnar <mingo@...hat.com>,
Jing Zhang <jingzhangos@...gle.com>,
"H. Peter Anvin" <hpa@...or.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
kvmarm@...ts.linux.dev,
Will Deacon <will@...nel.org>,
Keisuke Nishimura <keisuke.nishimura@...ia.fr>,
Sebastian Ott <sebott@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Shusen Li <lishusen2@...wei.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Randy Dunlap <rdunlap@...radead.org>,
Marc Zyngier <maz@...nel.org>,
Maxim Levitsky <mlevitsk@...hat.com>,
Sean Christopherson <seanjc@...gle.com>,
Zenghui Yu <yuzenghui@...wei.com>
Subject: [PATCH v4 4/5] locking/mutex: implement mutex_lock_killable_nest_lock
KVM's SEV intra-host migration code needs to lock all vCPUs
of the source and the target VM, before it proceeds with the migration.
The number of vCPUs that belong to each VM is not bounded by anything
except a self-imposed KVM limit of CONFIG_KVM_MAX_NR_VCPUS vCPUs which is
significantly larger than the depth of lockdep's lock stack.
Luckily, the locks in both of the cases mentioned above, are held under
the 'kvm->lock' of each VM, which means that we can use the little
known lockdep feature called a "nest_lock" to support this use case in
a cleaner way, compared to the way it's currently done.
Implement and expose 'mutex_lock_killable_nest_lock' for this
purpose.
Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
---
include/linux/mutex.h | 17 +++++++++++++----
kernel/locking/mutex.c | 7 ++++---
2 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index da4518cfd59c..a039fa8c1780 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -156,16 +156,15 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
-
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
-extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
- unsigned int subclass);
+extern int __must_check _mutex_lock_killable(struct mutex *lock,
+ unsigned int subclass, struct lockdep_map *nest_lock);
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
-#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL)
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
#define mutex_lock_nest_lock(lock, nest_lock) \
@@ -174,6 +173,15 @@ do { \
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
+#define mutex_lock_killable_nest_lock(lock, nest_lock) \
+( \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
+ _mutex_lock_killable(lock, 0, &(nest_lock)->dep_map) \
+)
+
+#define mutex_lock_killable_nested(lock, subclass) \
+ _mutex_lock_killable(lock, subclass, NULL)
+
#else
extern void mutex_lock(struct mutex *lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
@@ -183,6 +191,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+# define mutex_lock_killable_nest_lock(lock, nest_lock) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index c75a838d3bae..234923121ff0 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -808,11 +808,12 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
int __sched
-mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
+_mutex_lock_killable(struct mutex *lock, unsigned int subclass,
+ struct lockdep_map *nest)
{
- return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
+ return __mutex_lock(lock, TASK_KILLABLE, subclass, nest, _RET_IP_);
}
-EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
+EXPORT_SYMBOL_GPL(_mutex_lock_killable);
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
--
2.46.0
Powered by blists - more mailing lists