>From c8b0d983791ef783165bbf2230ebc41145bf052e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 21 Jul 2022 08:49:37 -0700 Subject: [PATCH 2/2] KVM: x86/mmu: Check for full page vector _before_ adding a new page Check for a full page vector before adding to the vector instead of after adding to the vector array, i.e. bail if and only if the vector is full _and_ a new page needs to be added. Previously, KVM would still bail if the vector was full but there were no more unsync pages to process. Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ac60a52044ef..aca9a8e6c626 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1785,13 +1785,17 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, } child = to_shadow_page(ent & SPTE_BASE_ADDR_MASK); + if (!child->unsync && !child->unsync_children) { + clear_unsync_child_bit(sp, i); + continue; + } + + if (mmu_is_page_vec_full(pvec)) + return -ENOSPC; + + mmu_pages_add(pvec, child, i); if (child->unsync_children) { - mmu_pages_add(pvec, child, i); - - if (mmu_is_page_vec_full(pvec)) - return -ENOSPC; - ret = __mmu_unsync_walk(child, pvec); if (!ret) { clear_unsync_child_bit(sp, i); @@ -1800,14 +1804,9 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, nr_unsync_leaf += ret; } else return ret; - } else if (child->unsync) { + } else { nr_unsync_leaf++; - mmu_pages_add(pvec, child, i); - - if (mmu_is_page_vec_full(pvec)) - return -ENOSPC; - } else - clear_unsync_child_bit(sp, i); + } } return nr_unsync_leaf; -- 2.37.1.359.gd136c6c3e2-goog