[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <e8d3ab4a56d69a09ba74ff1c439f904075d38c16.1686858861.git.isaku.yamahata@intel.com>
Date: Thu, 15 Jun 2023 13:12:18 -0700
From: isaku.yamahata@...el.com
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: isaku.yamahata@...el.com, isaku.yamahata@...il.com,
Paolo Bonzini <pbonzini@...hat.com>, erdemaktas@...gle.com,
Sean Christopherson <seanjc@...gle.com>,
Sagi Shahar <sagis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Kai Huang <kai.huang@...el.com>,
Zhi Wang <zhi.wang.linux@...il.com>, chen.bo@...el.com,
linux-coco@...ts.linux.dev,
Chao Peng <chao.p.peng@...ux.intel.com>,
Ackerley Tng <ackerleytng@...gle.com>,
Vishal Annapurve <vannapurve@...gle.com>,
Michael Roth <michael.roth@....com>
Subject: [RFC PATCH 5/6] KVM: Add flags to struct kvm_gfn_range
From: Isaku Yamahata <isaku.yamahata@...el.com>
TDX and SEV-SNP need to know the reason for a callback by
kvm_unmap_gfn_range(). mmu notifier, set memory attributes ioctl or KVM
gmem callback. The callback handler changes the behavior or does the
additional housekeeping operation. For mmu notifier, it's zapping shared
PTE. For set memory attributes, it's the conversion of memory attributes
(private <=> shared). For KVM gmem, it's punching a hole in the range, and
releasing the file.
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
---
include/linux/kvm_host.h | 11 ++++++++++-
virt/kvm/guest_mem.c | 10 +++++++---
virt/kvm/kvm_main.c | 4 +++-
3 files changed, 20 insertions(+), 5 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1a47cedae8a1..c049c0aa44d6 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -256,12 +256,21 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+
+#define KVM_GFN_RANGE_FLAGS_SET_MEM_ATTR BIT(0)
+#define KVM_GFN_RANGE_FLAGS_GMEM_PUNCH_HOLE BIT(1)
+#define KVM_GFN_RANGE_FLAGS_GMEM_RELEASE BIT(2)
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
- pte_t pte;
+ union {
+ pte_t pte;
+ u64 attrs;
+ };
bool may_block;
+ unsigned int flags;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
index cdf2d84683c8..30b8f66784d4 100644
--- a/virt/kvm/guest_mem.c
+++ b/virt/kvm/guest_mem.c
@@ -99,7 +99,8 @@ static struct folio *kvm_gmem_get_folio(struct file *file, pgoff_t index)
}
static void kvm_gmem_invalidate_begin(struct kvm *kvm, struct kvm_gmem *gmem,
- pgoff_t start, pgoff_t end)
+ pgoff_t start, pgoff_t end,
+ unsigned int flags)
{
struct kvm_memory_slot *slot;
unsigned long index;
@@ -118,6 +119,7 @@ static void kvm_gmem_invalidate_begin(struct kvm *kvm, struct kvm_gmem *gmem,
.slot = slot,
.pte = __pte(0),
.may_block = true,
+ .flags = flags,
};
kvm_mmu_invalidate_range_add(kvm, gfn_range.start, gfn_range.end);
@@ -156,7 +158,8 @@ static long kvm_gmem_punch_hole(struct file *file, loff_t offset, loff_t len)
*/
filemap_invalidate_lock(file->f_mapping);
- kvm_gmem_invalidate_begin(kvm, gmem, start, end);
+ kvm_gmem_invalidate_begin(kvm, gmem, start, end,
+ KVM_GFN_RANGE_FLAGS_GMEM_PUNCH_HOLE);
truncate_inode_pages_range(file->f_mapping, offset, offset + len - 1);
@@ -263,7 +266,8 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
* Free the backing memory, and more importantly, zap all SPTEs that
* pointed at this file.
*/
- kvm_gmem_invalidate_begin(kvm, gmem, 0, -1ul);
+ kvm_gmem_invalidate_begin(kvm, gmem, 0, -1ul,
+ KVM_GFN_RANGE_FLAGS_GMEM_RELEASE);
truncate_inode_pages_final(file->f_mapping);
kvm_gmem_invalidate_end(kvm, gmem, 0, -1ul);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 422d49634c56..9cdfa2fb675f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -613,6 +613,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
gfn_range.slot = slot;
+ gfn_range.flags = 0;
if (!locked) {
locked = true;
@@ -2391,8 +2392,9 @@ static void kvm_mem_attrs_changed(struct kvm *kvm, unsigned long attrs,
bool flush = false;
int i;
- gfn_range.pte = __pte(0);
+ gfn_range.attrs = attrs;
gfn_range.may_block = true;
+ gfn_range.flags = KVM_GFN_RANGE_FLAGS_SET_MEM_ATTR;
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
slots = __kvm_memslots(kvm, i);
--
2.25.1
Powered by blists - more mailing lists