From 4a99deac7bd2a5e05e08d986e5761e9a15775eda Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 20 Dec 2023 11:03:34 -0500 Subject: [PATCH] locking/osq_lock: Minimize spinning on prev->cpu When CONFIG_PARAVIRT_SPINLOCKS is set, osq_lock() will spin on both node->locked and node->prev->cpu. That can cause contention with another CPU modifying node->prev. Reduce that contention by caching prev and prev->cpu and updating the cached values if node->prev changes. Signed-off-by: Waiman Long --- kernel/locking/osq_lock.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index d5610ad52b92..91293401e3e6 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -87,12 +87,27 @@ osq_wait_next(struct optimistic_spin_queue *lock, return next; } +#ifndef vcpu_is_preempted +#define prev_vcpu_is_preempted(n, p, c) false +#else +static inline bool prev_vcpu_is_preempted(struct optimistic_spin_node *node, + struct optimistic_spin_node **pprev, + int *ppvcpu) +{ + if (node->prev != *pprev) { + *pprev = node->prev; + *ppvcpu = node_cpu(*pprev); + } + return vcpu_is_preempted(*ppvcpu); +} +#endif + bool osq_lock(struct optimistic_spin_queue *lock) { struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); struct optimistic_spin_node *prev, *next; int curr = encode_cpu(smp_processor_id()); - int old; + int old, pvcpu; node->locked = 0; node->next = NULL; @@ -110,6 +125,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) prev = decode_cpu(old); node->prev = prev; + pvcpu = node_cpu(prev); /* * osq_lock() unqueue @@ -141,7 +157,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) * polling, be careful. */ if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() || - vcpu_is_preempted(node_cpu(node->prev)))) + prev_vcpu_is_preempted(node, &prev, &pvcpu))) return true; /* unqueue */ -- 2.39.3