[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CADrL8HVvNbNe1o7Db3du_QDTvkMoSuv5gU09TAHxzY45BqpSjA@mail.gmail.com>
Date: Mon, 27 Jan 2025 13:52:12 -0800
From: James Houghton <jthoughton@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>
Cc: David Matlack <dmatlack@...gle.com>, David Rientjes <rientjes@...gle.com>,
Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>, Wei Xu <weixugc@...gle.com>,
Yu Zhao <yuzhao@...gle.com>, Axel Rasmussen <axelrasmussen@...gle.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v8 08/11] KVM: x86/mmu: Add infrastructure to allow
walking rmaps outside of mmu_lock
On Tue, Nov 5, 2024 at 10:43 AM James Houghton <jthoughton@...gle.com> wrote:
>
> From: Sean Christopherson <seanjc@...gle.com>
>
> Steal another bit from rmap entries (which are word aligned pointers, i.e.
> have 2 free bits on 32-bit KVM, and 3 free bits on 64-bit KVM), and use
> the bit to implement a *very* rudimentary per-rmap spinlock. The only
> anticipated usage of the lock outside of mmu_lock is for aging gfns, and
> collisions between aging and other MMU rmap operations are quite rare,
> e.g. unless userspace is being silly and aging a tiny range over and over
> in a tight loop, time between contention when aging an actively running VM
> is O(seconds). In short, a more sophisticated locking scheme shouldn't be
> necessary.
>
> Note, the lock only protects the rmap structure itself, SPTEs that are
> pointed at by a locked rmap can still be modified and zapped by another
> task (KVM drops/zaps SPTEs before deleting the rmap entries)
>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> Co-developed-by: James Houghton <jthoughton@...gle.com>
> Signed-off-by: James Houghton <jthoughton@...gle.com>
> ---
> arch/x86/include/asm/kvm_host.h | 3 +-
> arch/x86/kvm/mmu/mmu.c | 129 +++++++++++++++++++++++++++++---
> 2 files changed, 120 insertions(+), 12 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 84ee08078686..378b87ff5b1f 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -26,6 +26,7 @@
> #include <linux/irqbypass.h>
> #include <linux/hyperv.h>
> #include <linux/kfifo.h>
> +#include <linux/atomic.h>
>
> #include <asm/apic.h>
> #include <asm/pvclock-abi.h>
> @@ -402,7 +403,7 @@ union kvm_cpu_role {
> };
>
> struct kvm_rmap_head {
> - unsigned long val;
> + atomic_long_t val;
> };
>
> struct kvm_pio_request {
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 145ea180963e..1cdb77df0a4d 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -847,11 +847,117 @@ static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu
> * About rmap_head encoding:
> *
> * If the bit zero of rmap_head->val is clear, then it points to the only spte
> - * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
> + * in this rmap chain. Otherwise, (rmap_head->val & ~3) points to a struct
> * pte_list_desc containing more mappings.
> */
> #define KVM_RMAP_MANY BIT(0)
>
> +/*
> + * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always
> + * operates with mmu_lock held for write), but rmaps can be walked without
> + * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain
> + * being zapped/dropped _while the rmap is locked_.
> + *
> + * Other than the KVM_RMAP_LOCKED flag, modifications to rmap entries must be
> + * done while holding mmu_lock for write. This allows a task walking rmaps
> + * without holding mmu_lock to concurrently walk the same entries as a task
> + * that is holding mmu_lock but _not_ the rmap lock. Neither task will modify
> + * the rmaps, thus the walks are stable.
> + *
> + * As alluded to above, SPTEs in rmaps are _not_ protected by KVM_RMAP_LOCKED,
> + * only the rmap chains themselves are protected. E.g. holding an rmap's lock
> + * ensures all "struct pte_list_desc" fields are stable.
> + */
> +#define KVM_RMAP_LOCKED BIT(1)
> +
> +static unsigned long kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
> +{
> + unsigned long old_val, new_val;
> +
> + /*
> + * Elide the lock if the rmap is empty, as lockless walkers (read-only
> + * mode) don't need to (and can't) walk an empty rmap, nor can they add
> + * entries to the rmap. I.e. the only paths that process empty rmaps
> + * do so while holding mmu_lock for write, and are mutually exclusive.
> + */
> + old_val = atomic_long_read(&rmap_head->val);
> + if (!old_val)
> + return 0;
> +
> + do {
> + /*
> + * If the rmap is locked, wait for it to be unlocked before
> + * trying acquire the lock, e.g. to bounce the cache line.
> + */
> + while (old_val & KVM_RMAP_LOCKED) {
> + old_val = atomic_long_read(&rmap_head->val);
> + cpu_relax();
> + }
> +
> + /*
> + * Recheck for an empty rmap, it may have been purged by the
> + * task that held the lock.
> + */
> + if (!old_val)
> + return 0;
> +
> + new_val = old_val | KVM_RMAP_LOCKED;
> + /*
> + * Use try_cmpxchg_acquire to prevent reads and writes to the rmap
> + * from being reordered outside of the critical section created by
> + * __kvm_rmap_lock.
> + *
> + * Pairs with smp_store_release in kvm_rmap_unlock.
> + *
> + * For the !old_val case, no ordering is needed, as there is no rmap
> + * to walk.
> + */
> + } while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val));
> +
> + /* Return the old value, i.e. _without_ the LOCKED bit set. */
> + return old_val;
> +}
> +
> +static void kvm_rmap_unlock(struct kvm_rmap_head *rmap_head,
> + unsigned long new_val)
> +{
> + WARN_ON_ONCE(new_val & KVM_RMAP_LOCKED);
> + /*
> + * Ensure that all accesses to the rmap have completed
> + * before we actually unlock the rmap.
> + *
> + * Pairs with the atomic_long_try_cmpxchg_acquire in __kvm_rmap_lock.
> + */
> + atomic_long_set_release(&rmap_head->val, new_val);
> +}
> +
> +static unsigned long kvm_rmap_get(struct kvm_rmap_head *rmap_head)
> +{
> + return atomic_long_read(&rmap_head->val) & ~KVM_RMAP_LOCKED;
> +}
> +
> +/*
> + * If mmu_lock isn't held, rmaps can only locked in read-only mode. The actual
> + * locking is the same, but the caller is disallowed from modifying the rmap,
> + * and so the unlock flow is a nop if the rmap is/was empty.
> + */
> +__maybe_unused
> +static unsigned long kvm_rmap_lock_readonly(struct kvm_rmap_head *rmap_head)
> +{
> + return __kvm_rmap_lock(rmap_head);
> +}
> +
> +__maybe_unused
> +static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head,
> + unsigned long old_val)
> +{
> + if (!old_val)
> + return;
> +
> + KVM_MMU_WARN_ON(old_val != kvm_rmap_get(rmap_head));
> + atomic_long_set(&rmap_head->val, old_val);
Trying not to unnecessarily extend the conversion we already had about
memory ordering here[1]....
I'm pretty sure this should actually be atomic_long_set_release(),
just like kvm_rmap_unlock(), as we cannot permit (at least) the
compiler to reorder rmap reads past this atomic store.
I *think* I mistakenly thought it was okay to leave it as
atomic_long_set() because this routine is only reading, but of course,
those reads must stay within the critical section.
Anyway, I've refactored it like this:
static void __kvm_rmap_unlock(struct kvm_rmap_head *rmap_head,
unsigned long val)
{
KVM_MMU_WARN_ON(val & KVM_RMAP_LOCKED);
/*
* Ensure that all accesses to the rmap have completed
* before we actually unlock the rmap.
*
* Pairs with the atomic_long_try_cmpxchg_acquire in __kvm_rmap_lock.
*/
atomic_long_set_release(&rmap_head->val, val);
}
static void kvm_rmap_unlock(struct kvm *kvm,
struct kvm_rmap_head *rmap_head,
unsigned long new_val)
{
lockdep_assert_held_write(&kvm->mmu_lock);
__kvm_rmap_unlock(rmap_head, new_val);
}
static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head,
unsigned long old_val)
{
if (!old_val)
return;
KVM_MMU_WARN_ON(old_val != kvm_rmap_get(rmap_head));
__kvm_rmap_unlock(rmap_head, old_val);
preempt_enable();
}
It's still true that the !old_val case needs no such ordering, as
!old_val means there is nothing to walk.
[1]: https://lore.kernel.org/all/ZuG4YYzozOddPRCm@google.com/
Powered by blists - more mailing lists