[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231122121822.1042-9-paul@xen.org>
Date: Wed, 22 Nov 2023 12:18:15 +0000
From: Paul Durrant <paul@....org>
To: David Woodhouse <dwmw2@...radead.org>, Paul Durrant <paul@....org>,
Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v9 08/15] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA
From: Paul Durrant <pdurrant@...zon.com>
Some pfncache pages may actually be overlays on guest memory that have a
fixed HVA within the VMM. It's pointless to invalidate such cached
mappings if the overlay is moved so allow a cache to be activated directly
with the HVA to cater for such cases. A subsequent patch will make use
of this facility.
Signed-off-by: Paul Durrant <pdurrant@...zon.com>
---
Cc: Sean Christopherson <seanjc@...gle.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>
Cc: David Woodhouse <dwmw2@...radead.org>
v9:
- Pass both GPA and HVA into __kvm_gpc_refresh() rather than overloading
the address paraneter and using a bool flag to indicated what it is.
v8:
- Re-worked to avoid messing with struct gfn_to_pfn_cache.
---
include/linux/kvm_host.h | 19 ++++++++++++++++++-
virt/kvm/pfncache.c | 35 +++++++++++++++++++++++++++--------
2 files changed, 45 insertions(+), 9 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b1dc2e5a64f3..484c587e8290 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1312,6 +1312,22 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
*/
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
+/**
+ * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @hva: userspace virtual address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * The semantics of this function are the same as those of kvm_gpc_activate(). It
+ * merely bypasses a layer of address translation.
+ */
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
+
/**
* kvm_gpc_check - check validity of a gfn_to_pfn_cache.
*
@@ -1365,7 +1381,8 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
*/
static inline void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc)
{
- mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
+ if (gpc->gpa != KVM_XEN_INVALID_GPA)
+ mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
}
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 97eec8ee3449..c2a2d1e145b6 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -209,11 +209,13 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
return -EFAULT;
}
-static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
+static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
- unsigned long page_offset = offset_in_page(gpa);
+ unsigned long page_offset = (gpa != KVM_XEN_INVALID_GPA) ?
+ offset_in_page(gpa) :
+ offset_in_page(uhva);
bool unmap_old = false;
unsigned long old_uhva;
kvm_pfn_t old_pfn;
@@ -246,9 +248,15 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
- /* Refresh the userspace HVA if necessary */
- if (gpc->gpa != gpa || gpc->generation != slots->generation ||
- kvm_is_error_hva(gpc->uhva)) {
+ if (gpa == KVM_XEN_INVALID_GPA) {
+ gpc->gpa = KVM_XEN_INVALID_GPA;
+ gpc->uhva = PAGE_ALIGN_DOWN(uhva);
+
+ if (gpc->uhva != old_uhva)
+ hva_change = true;
+ } else if (gpc->gpa != gpa ||
+ gpc->generation != slots->generation ||
+ kvm_is_error_hva(gpc->uhva)) {
gfn_t gfn = gpa_to_gfn(gpa);
gpc->gpa = gpa;
@@ -319,7 +327,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
{
- return __kvm_gpc_refresh(gpc, gpc->gpa, len);
+ return __kvm_gpc_refresh(gpc, gpc->gpa, gpc->uhva, len);
}
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
@@ -332,7 +340,8 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
gpc->uhva = KVM_HVA_ERR_BAD;
}
-int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
+ unsigned long len)
{
struct kvm *kvm = gpc->kvm;
@@ -353,7 +362,17 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
gpc->active = true;
write_unlock_irq(&gpc->lock);
}
- return __kvm_gpc_refresh(gpc, gpa, len);
+ return __kvm_gpc_refresh(gpc, gpa, uhva, len);
+}
+
+int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
+{
+ return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
+}
+
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
+{
+ return __kvm_gpc_activate(gpc, KVM_XEN_INVALID_GPA, uhva, len);
}
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
--
2.39.2
Powered by blists - more mailing lists