[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <3e5e7c40608e9fc1b3aff53fca0ba9b3a37744b3.1673539699.git.isaku.yamahata@intel.com>
Date: Thu, 12 Jan 2023 08:31:50 -0800
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com, isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>, erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>
Subject: [PATCH v11 042/113] KVM: Add flags to struct kvm_gfn_range
From: Isaku Yamahata <isaku.yamahata@...el.com>
kvm_unmap_gfn_range() needs to know the reason of the callback for TDX.
mmu notifier, set memattr ioctl or restrictedmem notifier. Based on the
reason, TDX changes the behavior. For mmu notifier, it's the operation on
shared memory slot to zap shared PTE. For set memattr, it's the operation
of private<->shared conversion, zap the original PTE. For restrictedmem,
it's punching a hole of the range, zap the corresponding PTE.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
include/linux/kvm_host.h | 9 ++++++++-
virt/kvm/kvm_main.c | 5 ++++-
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index cd1f3634dd6a..0c3b9cf0a731 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -256,12 +256,19 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
+#define KVM_GFN_RANGE_FLAGS_RESTRICTED_MEM BIT(0)
+#define KVM_GFN_RANGE_FLAGS_SET_MEM_ATTR BIT(1)
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
- pte_t pte;
+ union {
+ pte_t pte;
+ u64 attrs;
+ };
bool may_block;
+ unsigned int flags;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6c61b71b56d2..aef8802b188e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -669,6 +669,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
gfn_range.slot = slot;
+ gfn_range.flags = 0;
if (!locked) {
locked = true;
@@ -971,6 +972,7 @@ static void kvm_restrictedmem_invalidate_begin(struct restrictedmem_notifier *no
gfn_range.slot = slot;
gfn_range.pte = __pte(0);
gfn_range.may_block = true;
+ gfn_range.flags = KVM_GFN_RANGE_FLAGS_RESTRICTED_MEM;
idx = srcu_read_lock(&kvm->srcu);
KVM_MMU_LOCK(kvm);
@@ -2511,8 +2513,9 @@ static void kvm_unmap_mem_range(struct kvm *kvm, gfn_t start, gfn_t end,
int i;
int r = 0;
- gfn_range.pte = __pte(0);
+ gfn_range.attrs = attrs;
gfn_range.may_block = true;
+ gfn_range.flags = KVM_GFN_RANGE_FLAGS_SET_MEM_ATTR;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
--
2.25.1
Powered by blists - more mailing lists