[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190220201609.28290-24-joao.m.martins@oracle.com>
Date: Wed, 20 Feb 2019 20:15:53 +0000
From: Joao Martins <joao.m.martins@...cle.com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: Ankur Arora <ankur.a.arora@...cle.com>,
Boris Ostrovsky <boris.ostrovsky@...cle.com>,
Joao Martins <joao.m.martins@...cle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Radim Krčmář <rkrcmar@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
"H. Peter Anvin" <hpa@...or.com>, x86@...nel.org
Subject: [PATCH RFC 23/39] KVM: x86/xen: grant table grow support
Guests grant tables with core Xen PV devices (xenbus, console) need to
be seeded with a bunch of reserved entries at boot. However, at init,
the grant table is, from a guest perspective, empty and has no frames
backing it. That only happens once the guest does:
XENMEM_add_to_physmap(idx=N,gfn=M,space=XENMAPSPACE_grant_table)
Which will share the added page with the hypervisor.
The way we handle this then is to seed (from userspace) the initial
frame where we store special entries which reference guest PV ring
pages. These pages are in-turn mapped/unmapped in backend domains
hosting xenstored and xenconsoled.
When the guest initializes its grant tables (with the hypercall listed
above) we copy the entries from the private frame into a "mapped" gfn.
To do this, the userspace VMM handles XENMEM_add_to_physmap hypercall and
the hypervisor grows its grant table. Note that a grant table can only
grow - no shrinking is possible.
Signed-off-by: Joao Martins <joao.m.martins@...cle.com>
---
arch/x86/include/asm/kvm_host.h | 16 ++++++++
arch/x86/kvm/xen.c | 90 +++++++++++++++++++++++++++++++++++++++++
include/uapi/linux/kvm.h | 5 +++
3 files changed, 111 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e0cbc0899580..70bb7339ddd4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -860,6 +860,21 @@ struct kvm_hv {
atomic_t num_mismatched_vp_indexes;
};
+struct kvm_grant_map {
+ u64 gpa;
+ union {
+ struct {
+
+#define _KVM_GNTMAP_ACTIVE (15)
+#define KVM_GNTMAP_ACTIVE (1 << _KVM_GNTMAP_ACTIVE)
+ u16 flags;
+ u16 ref;
+ u32 domid;
+ };
+ u64 fields;
+ };
+};
+
/* Xen grant table */
struct kvm_grant_table {
u32 nr_frames;
@@ -871,6 +886,7 @@ struct kvm_grant_table {
gfn_t *frames_addr;
gpa_t initial_addr;
struct grant_entry_v1 *initial;
+ struct kvm_grant_map **handle;
/* maptrack limits */
u32 max_mt_frames;
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index b9e6e8f72d87..7266d27db210 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -22,6 +22,12 @@
#include "trace.h"
+/* Grant v1 references per 4K page */
+#define GPP_V1 (PAGE_SIZE / sizeof(struct grant_entry_v1))
+
+/* Grant mappings per 4K page */
+#define MPP (PAGE_SIZE / sizeof(struct kvm_grant_map))
+
struct evtchnfd {
struct eventfd_ctx *ctx;
u32 vcpu;
@@ -1158,11 +1164,92 @@ int kvm_xen_gnttab_init(struct kvm *kvm, struct kvm_xen *xen,
void kvm_xen_gnttab_free(struct kvm_xen *xen)
{
struct kvm_grant_table *gnttab = &xen->gnttab;
+ int i;
+
+ for (i = 0; i < gnttab->nr_frames; i++)
+ put_page(virt_to_page(gnttab->frames[i]));
kfree(gnttab->frames);
kfree(gnttab->frames_addr);
}
+int kvm_xen_gnttab_copy_initial_frame(struct kvm *kvm)
+{
+ struct kvm_grant_table *gnttab = &kvm->arch.xen.gnttab;
+ int idx = 0;
+
+ /* Only meant to copy the first gpa being populated */
+ if (!gnttab->initial_addr || !gnttab->frames[idx])
+ return -EINVAL;
+
+ memcpy(gnttab->frames[idx], gnttab->initial, PAGE_SIZE);
+ return 0;
+}
+
+int kvm_xen_maptrack_grow(struct kvm_xen *xen, u32 target)
+{
+ u32 max_entries = target * GPP_V1;
+ u32 nr_entries = xen->gnttab.nr_mt_frames * MPP;
+ int i, j, err = 0;
+ void *addr;
+
+ for (i = nr_entries, j = xen->gnttab.nr_mt_frames;
+ i < max_entries; i += MPP, j++) {
+ addr = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!addr) {
+ err = -ENOMEM;
+ break;
+ }
+
+ xen->gnttab.handle[j] = addr;
+ }
+
+ xen->gnttab.nr_mt_frames = j;
+ xen->gnttab.nr_frames = target;
+ return err;
+}
+
+int kvm_xen_gnttab_grow(struct kvm *kvm, struct kvm_xen_gnttab *op)
+{
+ struct kvm_xen *xen = &kvm->arch.xen;
+ struct kvm_grant_table *gnttab = &xen->gnttab;
+ gfn_t *map = gnttab->frames_addr;
+ u64 gfn = op->grow.gfn;
+ u32 idx = op->grow.idx;
+ struct page *page;
+
+ if (idx < gnttab->nr_frames || idx >= gnttab->max_nr_frames)
+ return -EINVAL;
+
+ if (!idx && !gnttab->nr_frames &&
+ !gnttab->initial) {
+ return -EINVAL;
+ }
+
+ page = gfn_to_page(kvm, gfn);
+ if (is_error_page(page))
+ return -EINVAL;
+
+ map[idx] = gfn;
+
+ gnttab->frames[idx] = page_to_virt(page);
+ if (!idx && !gnttab->nr_frames &&
+ kvm_xen_gnttab_copy_initial_frame(kvm)) {
+ pr_err("kvm_xen: dom%u: failed to copy initial frame\n",
+ xen->domid);
+ return -EFAULT;
+ }
+
+ if (kvm_xen_maptrack_grow(xen, gnttab->nr_frames + 1)) {
+ pr_warn("kvm_xen: dom%u: cannot grow maptrack\n", xen->domid);
+ return -EFAULT;
+ }
+
+ pr_debug("kvm_xen: dom%u: grant table grow frames:%d/%d\n", xen->domid,
+ gnttab->nr_frames, gnttab->max_nr_frames);
+ return 0;
+}
+
int kvm_vm_ioctl_xen_gnttab(struct kvm *kvm, struct kvm_xen_gnttab *op)
{
int r = -EINVAL;
@@ -1174,6 +1261,9 @@ int kvm_vm_ioctl_xen_gnttab(struct kvm *kvm, struct kvm_xen_gnttab *op)
case KVM_XEN_GNTTAB_F_INIT:
r = kvm_xen_gnttab_init(kvm, &kvm->arch.xen, op, 0);
break;
+ case KVM_XEN_GNTTAB_F_GROW:
+ r = kvm_xen_gnttab_grow(kvm, op);
+ break;
default:
r = -ENOSYS;
break;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index e4fb9bc34d61..ff7f7d019472 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1505,6 +1505,7 @@ struct kvm_xen_hvm_attr {
} dom;
struct kvm_xen_gnttab {
#define KVM_XEN_GNTTAB_F_INIT 0
+#define KVM_XEN_GNTTAB_F_GROW (1 << 0)
__u32 flags;
union {
struct {
@@ -1512,6 +1513,10 @@ struct kvm_xen_hvm_attr {
__u32 max_maptrack_frames;
__u64 initial_frame;
} init;
+ struct {
+ __u32 idx;
+ __u64 gfn;
+ } grow;
__u32 padding[4];
};
} gnttab;
--
2.11.0
Powered by blists - more mailing lists