[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260106102222.25160-1-yan.y.zhao@intel.com>
Date: Tue, 6 Jan 2026 18:22:22 +0800
From: Yan Zhao <yan.y.zhao@...el.com>
To: pbonzini@...hat.com,
seanjc@...gle.com
Cc: linux-kernel@...r.kernel.org,
kvm@...r.kernel.org,
x86@...nel.org,
rick.p.edgecombe@...el.com,
dave.hansen@...el.com,
kas@...nel.org,
tabba@...gle.com,
ackerleytng@...gle.com,
michael.roth@....com,
david@...nel.org,
vannapurve@...gle.com,
sagis@...gle.com,
vbabka@...e.cz,
thomas.lendacky@....com,
nik.borisov@...e.com,
pgonda@...gle.com,
fan.du@...el.com,
jun.miao@...el.com,
francescolavra.fl@...il.com,
jgross@...e.com,
ira.weiny@...el.com,
isaku.yamahata@...el.com,
xiaoyao.li@...el.com,
kai.huang@...el.com,
binbin.wu@...ux.intel.com,
chao.p.peng@...el.com,
chao.gao@...el.com,
yan.y.zhao@...el.com
Subject: [PATCH v3 14/24] KVM: Change the return type of gfn_handler_t() from bool to int
Modify the return type of gfn_handler_t() from bool to int. A negative
return value indicates failure, while a return value of 1 signifies success
with a flush required, and 0 denotes success without a flush required.
This adjustment prepares for a later change that will enable
kvm_pre_set_memory_attributes() to fail.
No functional changes expected.
Signed-off-by: Yan Zhao <yan.y.zhao@...el.com>
---
v3:
- Rebased.
RFC v2:
- No change
RFC v1:
- New patch.
---
arch/arm64/kvm/mmu.c | 8 ++++----
arch/loongarch/kvm/mmu.c | 8 ++++----
arch/mips/kvm/mmu.c | 6 +++---
arch/powerpc/kvm/book3s.c | 4 ++--
arch/powerpc/kvm/e500_mmu_host.c | 8 ++++----
arch/riscv/kvm/mmu.c | 12 ++++++------
arch/x86/kvm/mmu/mmu.c | 20 ++++++++++----------
include/linux/kvm_host.h | 12 ++++++------
virt/kvm/kvm_main.c | 24 ++++++++++++++++--------
9 files changed, 55 insertions(+), 47 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 5ab0cfa08343..c39d3ef577f8 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2221,12 +2221,12 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return false;
}
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
u64 size = (range->end - range->start) << PAGE_SHIFT;
if (!kvm->arch.mmu.pgt)
- return false;
+ return 0;
return KVM_PGT_FN(kvm_pgtable_stage2_test_clear_young)(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT,
@@ -2237,12 +2237,12 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
*/
}
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
u64 size = (range->end - range->start) << PAGE_SHIFT;
if (!kvm->arch.mmu.pgt)
- return false;
+ return 0;
return KVM_PGT_FN(kvm_pgtable_stage2_test_clear_young)(kvm->arch.mmu.pgt,
range->start << PAGE_SHIFT,
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index a7fa458e3360..06fa060878c9 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -511,7 +511,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
range->end << PAGE_SHIFT, &ctx);
}
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_ptw_ctx ctx;
@@ -523,15 +523,15 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
range->end << PAGE_SHIFT, &ctx);
}
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
gpa_t gpa = range->start << PAGE_SHIFT;
kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
if (ptep && kvm_pte_present(NULL, ptep) && kvm_pte_young(*ptep))
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index d2c3b6b41f18..c26cc89c8e98 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -444,18 +444,18 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return true;
}
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
}
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
gpa_t gpa = range->start << PAGE_SHIFT;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
if (!gpa_pte)
- return false;
+ return 0;
return pte_young(*gpa_pte);
}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index d79c5d1098c0..9bf6e1cf64f1 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -886,12 +886,12 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
}
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
return kvm->arch.kvm_ops->age_gfn(kvm, range);
}
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
}
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 06caf8bbbe2b..dd5411ee242e 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -697,16 +697,16 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return kvm_e500_mmu_unmap_gfn(kvm, range);
}
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return false;
+ return 0;
}
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return false;
+ return 0;
}
/*****************************************/
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 4ab06697bfc0..aa163d2ef7d5 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -259,7 +259,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return false;
}
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
pte_t *ptep;
u32 ptep_level = 0;
@@ -267,7 +267,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
struct kvm_gstage gstage;
if (!kvm->arch.pgd)
- return false;
+ return 0;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
@@ -277,12 +277,12 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
gstage.pgd = kvm->arch.pgd;
if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
&ptep, &ptep_level))
- return false;
+ return 0;
return ptep_test_and_clear_young(NULL, 0, ptep);
}
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
pte_t *ptep;
u32 ptep_level = 0;
@@ -290,7 +290,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
struct kvm_gstage gstage;
if (!kvm->arch.pgd)
- return false;
+ return 0;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
@@ -300,7 +300,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
gstage.pgd = kvm->arch.pgd;
if (!kvm_riscv_gstage_get_leaf(&gstage, range->start << PAGE_SHIFT,
&ptep, &ptep_level))
- return false;
+ return 0;
return pte_young(ptep_get(ptep));
}
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 029f2f272ffc..1b180279aacd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1810,7 +1810,7 @@ static bool kvm_may_have_shadow_mmu_sptes(struct kvm *kvm)
return !tdp_mmu_enabled || READ_ONCE(kvm->arch.indirect_shadow_pages);
}
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
@@ -1823,7 +1823,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
return young;
}
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
@@ -7962,8 +7962,8 @@ static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
}
-bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
- struct kvm_gfn_range *range)
+int kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range)
{
struct kvm_memory_slot *slot = range->slot;
int level;
@@ -7980,10 +7980,10 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
* a hugepage can be used for affected ranges.
*/
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
- return false;
+ return 0;
if (WARN_ON_ONCE(range->end <= range->start))
- return false;
+ return 0;
/*
* If the head and tail pages of the range currently allow a hugepage,
@@ -8042,8 +8042,8 @@ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
return true;
}
-bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
- struct kvm_gfn_range *range)
+int kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range)
{
unsigned long attrs = range->arg.attributes;
struct kvm_memory_slot *slot = range->slot;
@@ -8059,7 +8059,7 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* SHARED may now allow hugepages.
*/
if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
- return false;
+ return 0;
/*
* The sequence matters here: upper levels consume the result of lower
@@ -8106,7 +8106,7 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
hugepage_set_mixed(slot, gfn, level);
}
}
- return false;
+ return 0;
}
void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e563bb22c481..6f3d29db0505 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -273,8 +273,8 @@ struct kvm_gfn_range {
bool lockless;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
-bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
-bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+int kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+int kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
int kvm_split_cross_boundary_leafs(struct kvm *kvm, struct kvm_gfn_range *range,
bool shared);
#endif
@@ -734,10 +734,10 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
extern bool vm_memory_attributes;
bool kvm_range_has_vm_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
unsigned long mask, unsigned long attrs);
-bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+int kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+int kvm_arch_post_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range);
-bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
- struct kvm_gfn_range *range);
#else
#define vm_memory_attributes false
#endif /* CONFIG_KVM_VM_MEMORY_ATTRIBUTES */
@@ -1568,7 +1568,7 @@ void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void kvm_mmu_invalidate_begin(struct kvm *kvm);
void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
void kvm_mmu_invalidate_end(struct kvm *kvm);
-bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+int kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index feeef7747099..471f798dba2d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -517,7 +517,7 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
return container_of(mn, struct kvm, mmu_notifier);
}
-typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
+typedef int (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
typedef void (*on_lock_fn_t)(struct kvm *kvm);
@@ -601,6 +601,7 @@ static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
kvm_for_each_memslot_in_hva_range(node, slots,
range->start, range->end - 1) {
unsigned long hva_start, hva_end;
+ int ret;
slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
hva_start = max_t(unsigned long, range->start, slot->userspace_addr);
@@ -641,7 +642,9 @@ static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
goto mmu_unlock;
}
}
- r.ret |= range->handler(kvm, &gfn_range);
+ ret = range->handler(kvm, &gfn_range);
+ WARN_ON_ONCE(ret < 0);
+ r.ret |= ret;
}
}
@@ -727,7 +730,7 @@ void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end)
}
}
-bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+int kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_mmu_invalidate_range_add(kvm, range->start, range->end);
return kvm_unmap_gfn_range(kvm, range);
@@ -2507,7 +2510,8 @@ static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
struct kvm_memslots *slots;
struct kvm_memslot_iter iter;
bool found_memslot = false;
- bool ret = false;
+ bool flush = false;
+ int ret = 0;
int i;
gfn_range.arg = range->arg;
@@ -2540,19 +2544,23 @@ static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
range->on_lock(kvm);
}
- ret |= range->handler(kvm, &gfn_range);
+ ret = range->handler(kvm, &gfn_range);
+ if (ret < 0)
+ goto err;
+ flush |= ret;
}
}
- if (range->flush_on_ret && ret)
+err:
+ if (range->flush_on_ret && flush)
kvm_flush_remote_tlbs(kvm);
if (found_memslot)
KVM_MMU_UNLOCK(kvm);
}
-static bool kvm_pre_set_memory_attributes(struct kvm *kvm,
- struct kvm_gfn_range *range)
+static int kvm_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range)
{
/*
* Unconditionally add the range to the invalidation set, regardless of
--
2.43.2
Powered by blists - more mailing lists