[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+EHjTySnJTuLB+XoRya6kS_zw2iMahW9-Ze70oKTf+6k0GrGQ@mail.gmail.com>
Date: Thu, 27 Oct 2022 11:29:14 +0100
From: Fuad Tabba <tabba@...gle.com>
To: Chao Peng <chao.p.peng@...ux.intel.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
linux-arch@...r.kernel.org, linux-api@...r.kernel.org,
linux-doc@...r.kernel.org, qemu-devel@...gnu.org,
Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H . Peter Anvin" <hpa@...or.com>,
Hugh Dickins <hughd@...gle.com>,
Jeff Layton <jlayton@...nel.org>,
"J . Bruce Fields" <bfields@...ldses.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Shuah Khan <shuah@...nel.org>, Mike Rapoport <rppt@...nel.org>,
Steven Price <steven.price@....com>,
"Maciej S . Szmigiero" <mail@...iej.szmigiero.name>,
Vlastimil Babka <vbabka@...e.cz>,
Vishal Annapurve <vannapurve@...gle.com>,
Yu Zhang <yu.c.zhang@...ux.intel.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
luto@...nel.org, jun.nakajima@...el.com, dave.hansen@...el.com,
ak@...ux.intel.com, david@...hat.com, aarcange@...hat.com,
ddutile@...hat.com, dhildenb@...hat.com,
Quentin Perret <qperret@...gle.com>,
Michael Roth <michael.roth@....com>, mhocko@...e.com,
Muchun Song <songmuchun@...edance.com>, wei.w.wang@...el.com
Subject: Re: [PATCH v9 4/8] KVM: Use gfn instead of hva for mmu_notifier_retry
Hi,
On Tue, Oct 25, 2022 at 4:19 PM Chao Peng <chao.p.peng@...ux.intel.com> wrote:
>
> Currently in mmu_notifier validate path, hva range is recorded and then
> checked against in the mmu_notifier_retry_hva() of the page fault path.
> However, for the to be introduced private memory, a page fault may not
> have a hva associated, checking gfn(gpa) makes more sense.
>
> For existing non private memory case, gfn is expected to continue to
> work. The only downside is when aliasing multiple gfns to a single hva,
> the current algorithm of checking multiple ranges could result in a much
> larger range being rejected. Such aliasing should be uncommon, so the
> impact is expected small.
>
> It also fixes a bug in kvm_zap_gfn_range() which has already been using
nit: Now it's kvm_unmap_gfn_range().
> gfn when calling kvm_mmu_invalidate_begin/end() while these functions
> accept hva in current code.
>
> Signed-off-by: Chao Peng <chao.p.peng@...ux.intel.com>
> ---
Based on reading this code and my limited knowledge of the x86 MMU code:
Reviewed-by: Fuad Tabba <tabba@...gle.com>
Cheers,
/fuad
> arch/x86/kvm/mmu/mmu.c | 2 +-
> include/linux/kvm_host.h | 18 +++++++---------
> virt/kvm/kvm_main.c | 45 ++++++++++++++++++++++++++--------------
> 3 files changed, 39 insertions(+), 26 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 6f81539061d6..33b1aec44fb8 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4217,7 +4217,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
> return true;
>
> return fault->slot &&
> - mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
> + mmu_invalidate_retry_gfn(vcpu->kvm, mmu_seq, fault->gfn);
> }
>
> static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 739a7562a1f3..79e5cbc35fcf 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -775,8 +775,8 @@ struct kvm {
> struct mmu_notifier mmu_notifier;
> unsigned long mmu_invalidate_seq;
> long mmu_invalidate_in_progress;
> - unsigned long mmu_invalidate_range_start;
> - unsigned long mmu_invalidate_range_end;
> + gfn_t mmu_invalidate_range_start;
> + gfn_t mmu_invalidate_range_end;
> #endif
> struct list_head devices;
> u64 manual_dirty_log_protect;
> @@ -1365,10 +1365,8 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
> void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
> #endif
>
> -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
> - unsigned long end);
> -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
> - unsigned long end);
> +void kvm_mmu_invalidate_begin(struct kvm *kvm, gfn_t start, gfn_t end);
> +void kvm_mmu_invalidate_end(struct kvm *kvm, gfn_t start, gfn_t end);
>
> long kvm_arch_dev_ioctl(struct file *filp,
> unsigned int ioctl, unsigned long arg);
> @@ -1937,9 +1935,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
> return 0;
> }
>
> -static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
> +static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
> unsigned long mmu_seq,
> - unsigned long hva)
> + gfn_t gfn)
> {
> lockdep_assert_held(&kvm->mmu_lock);
> /*
> @@ -1949,8 +1947,8 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
> * positives, due to shortcuts when handing concurrent invalidations.
> */
> if (unlikely(kvm->mmu_invalidate_in_progress) &&
> - hva >= kvm->mmu_invalidate_range_start &&
> - hva < kvm->mmu_invalidate_range_end)
> + gfn >= kvm->mmu_invalidate_range_start &&
> + gfn < kvm->mmu_invalidate_range_end)
> return 1;
> if (kvm->mmu_invalidate_seq != mmu_seq)
> return 1;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 8dace78a0278..09c9cdeb773c 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -540,8 +540,7 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
>
> typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
>
> -typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
> - unsigned long end);
> +typedef void (*on_lock_fn_t)(struct kvm *kvm, gfn_t start, gfn_t end);
>
> typedef void (*on_unlock_fn_t)(struct kvm *kvm);
>
> @@ -628,7 +627,8 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
> locked = true;
> KVM_MMU_LOCK(kvm);
> if (!IS_KVM_NULL_FN(range->on_lock))
> - range->on_lock(kvm, range->start, range->end);
> + range->on_lock(kvm, gfn_range.start,
> + gfn_range.end);
> if (IS_KVM_NULL_FN(range->handler))
> break;
> }
> @@ -715,15 +715,9 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
> kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
> }
>
> -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
> - unsigned long end)
> +static inline void update_invalidate_range(struct kvm *kvm, gfn_t start,
> + gfn_t end)
> {
> - /*
> - * The count increase must become visible at unlock time as no
> - * spte can be established without taking the mmu_lock and
> - * count is also read inside the mmu_lock critical section.
> - */
> - kvm->mmu_invalidate_in_progress++;
> if (likely(kvm->mmu_invalidate_in_progress == 1)) {
> kvm->mmu_invalidate_range_start = start;
> kvm->mmu_invalidate_range_end = end;
> @@ -744,6 +738,28 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
> }
> }
>
> +static void mark_invalidate_in_progress(struct kvm *kvm, gfn_t start, gfn_t end)
> +{
> + /*
> + * The count increase must become visible at unlock time as no
> + * spte can be established without taking the mmu_lock and
> + * count is also read inside the mmu_lock critical section.
> + */
> + kvm->mmu_invalidate_in_progress++;
> +}
> +
> +static bool kvm_mmu_handle_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
> +{
> + update_invalidate_range(kvm, range->start, range->end);
> + return kvm_unmap_gfn_range(kvm, range);
> +}
> +
> +void kvm_mmu_invalidate_begin(struct kvm *kvm, gfn_t start, gfn_t end)
> +{
> + mark_invalidate_in_progress(kvm, start, end);
> + update_invalidate_range(kvm, start, end);
> +}
> +
> static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
> const struct mmu_notifier_range *range)
> {
> @@ -752,8 +768,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
> .start = range->start,
> .end = range->end,
> .pte = __pte(0),
> - .handler = kvm_unmap_gfn_range,
> - .on_lock = kvm_mmu_invalidate_begin,
> + .handler = kvm_mmu_handle_gfn_range,
> + .on_lock = mark_invalidate_in_progress,
> .on_unlock = kvm_arch_guest_memory_reclaimed,
> .flush_on_ret = true,
> .may_block = mmu_notifier_range_blockable(range),
> @@ -791,8 +807,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
> return 0;
> }
>
> -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
> - unsigned long end)
> +void kvm_mmu_invalidate_end(struct kvm *kvm, gfn_t start, gfn_t end)
> {
> /*
> * This sequence increase will notify the kvm page fault that
> --
> 2.25.1
>
Powered by blists - more mailing lists