[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240412065718.29105-16-yi-de.wu@mediatek.com>
Date: Fri, 12 Apr 2024 14:57:12 +0800
From: Yi-De Wu <yi-de.wu@...iatek.com>
To: Yingshiuan Pan <yingshiuan.pan@...iatek.com>, Ze-Yu Wang
<ze-yu.wang@...iatek.com>, Yi-De Wu <yi-de.wu@...iatek.com>, Rob Herring
<robh@...nel.org>, Krzysztof Kozlowski <krzk+dt@...nel.org>, Conor Dooley
<conor+dt@...nel.org>, Jonathan Corbet <corbet@....net>, Catalin Marinas
<catalin.marinas@....com>, Will Deacon <will@...nel.org>, Richard Cochran
<richardcochran@...il.com>, Matthias Brugger <matthias.bgg@...il.com>,
AngeloGioacchino Del Regno <angelogioacchino.delregno@...labora.com>
CC: <devicetree@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<linux-doc@...r.kernel.org>, <linux-arm-kernel@...ts.infradead.org>,
<netdev@...r.kernel.org>, <linux-mediatek@...ts.infradead.org>, David Bradil
<dbrazdil@...gle.com>, Trilok Soni <quic_tsoni@...cinc.com>, My Chuang
<my.chuang@...iatek.com>, Shawn Hsiao <shawn.hsiao@...iatek.com>, PeiLun Suei
<peilun.suei@...iatek.com>, Liju Chen <liju-clr.chen@...iatek.com>, Willix
Yeh <chi-shen.yeh@...iatek.com>, Kevenny Hsieh <kevenny.hsieh@...iatek.com>
Subject: [PATCH v10 15/21] virt: geniezone: Add demand paging support
From: "Yingshiuan Pan" <yingshiuan.pan@...iatek.com>
This page fault handler helps GenieZone hypervisor to do demand paging.
On a lower level translation fault, GenieZone hypervisor will first
check the fault GPA (guest physical address or IPA in ARM) is valid
e.g. within the registered memory region, then it will setup the
vcpu_run->exit_reason with necessary information for returning to
gzvm driver.
With the fault information, the gzvm driver looks up the physical
address and call the MT_HVC_GZVM_MAP_GUEST to request the hypervisor
maps the found PA to the fault GPA (IPA).
There is one exception, for protected vm, we will populate full VM's
memory region in advance in order to improve performance.
Signed-off-by: Yingshiuan Pan <yingshiuan.pan@...iatek.com>
Signed-off-by: Jerry Wang <ze-yu.wang@...iatek.com>
Signed-off-by: kevenny hsieh <kevenny.hsieh@...iatek.com>
Signed-off-by: Liju Chen <liju-clr.chen@...iatek.com>
Signed-off-by: Yi-De Wu <yi-de.wu@...iatek.com>
---
arch/arm64/geniezone/gzvm_arch_common.h | 2 ++
arch/arm64/geniezone/vm.c | 13 +++++++
drivers/virt/geniezone/Makefile | 4 +--
drivers/virt/geniezone/gzvm_exception.c | 39 ++++++++++++++++++++
drivers/virt/geniezone/gzvm_main.c | 2 ++
drivers/virt/geniezone/gzvm_mmu.c | 41 +++++++++++++++++++++
drivers/virt/geniezone/gzvm_vcpu.c | 6 ++--
drivers/virt/geniezone/gzvm_vm.c | 48 ++++++++++++++++++++++++-
include/linux/soc/mediatek/gzvm_drv.h | 14 ++++++++
include/uapi/linux/gzvm.h | 13 +++++++
10 files changed, 177 insertions(+), 5 deletions(-)
create mode 100644 drivers/virt/geniezone/gzvm_exception.c
diff --git a/arch/arm64/geniezone/gzvm_arch_common.h b/arch/arm64/geniezone/gzvm_arch_common.h
index 4366618cdc0a..928191e3cdb2 100644
--- a/arch/arm64/geniezone/gzvm_arch_common.h
+++ b/arch/arm64/geniezone/gzvm_arch_common.h
@@ -24,6 +24,7 @@ enum {
GZVM_FUNC_INFORM_EXIT = 14,
GZVM_FUNC_MEMREGION_PURPOSE = 15,
GZVM_FUNC_SET_DTB_CONFIG = 16,
+ GZVM_FUNC_MAP_GUEST = 17,
NR_GZVM_FUNC,
};
@@ -48,6 +49,7 @@ enum {
#define MT_HVC_GZVM_INFORM_EXIT GZVM_HCALL_ID(GZVM_FUNC_INFORM_EXIT)
#define MT_HVC_GZVM_MEMREGION_PURPOSE GZVM_HCALL_ID(GZVM_FUNC_MEMREGION_PURPOSE)
#define MT_HVC_GZVM_SET_DTB_CONFIG GZVM_HCALL_ID(GZVM_FUNC_SET_DTB_CONFIG)
+#define MT_HVC_GZVM_MAP_GUEST GZVM_HCALL_ID(GZVM_FUNC_MAP_GUEST)
#define GIC_V3_NR_LRS 16
diff --git a/arch/arm64/geniezone/vm.c b/arch/arm64/geniezone/vm.c
index cbebae3ff663..3cd24408f880 100644
--- a/arch/arm64/geniezone/vm.c
+++ b/arch/arm64/geniezone/vm.c
@@ -367,12 +367,16 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
struct gzvm_enable_cap *cap,
void __user *argp)
{
+ struct arm_smccc_res res = {0};
int ret;
switch (cap->cap) {
case GZVM_CAP_PROTECTED_VM:
ret = gzvm_vm_ioctl_cap_pvm(gzvm, cap, argp);
return ret;
+ case GZVM_CAP_ENABLE_DEMAND_PAGING:
+ ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
+ return ret;
default:
break;
}
@@ -404,3 +408,12 @@ u64 gzvm_hva_to_pa_arch(u64 hva)
return GZVM_PA_ERR_BAD;
return par;
}
+
+int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
+ u64 nr_pages)
+{
+ struct arm_smccc_res res;
+
+ return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST, vm_id, memslot_id,
+ pfn, gfn, nr_pages, 0, 0, &res);
+}
diff --git a/drivers/virt/geniezone/Makefile b/drivers/virt/geniezone/Makefile
index 9956f4891df2..2e12870637d5 100644
--- a/drivers/virt/geniezone/Makefile
+++ b/drivers/virt/geniezone/Makefile
@@ -8,5 +8,5 @@ GZVM_DIR ?= ../../../drivers/virt/geniezone
gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
$(GZVM_DIR)/gzvm_mmu.o $(GZVM_DIR)/gzvm_vcpu.o \
- $(GZVM_DIR)/gzvm_irqfd.o $(GZVM_DIR)/gzvm_ioeventfd.o
-
+ $(GZVM_DIR)/gzvm_irqfd.o $(GZVM_DIR)/gzvm_ioeventfd.o \
+ $(GZVM_DIR)/gzvm_exception.o
diff --git a/drivers/virt/geniezone/gzvm_exception.c b/drivers/virt/geniezone/gzvm_exception.c
new file mode 100644
index 000000000000..475bc15b0689
--- /dev/null
+++ b/drivers/virt/geniezone/gzvm_exception.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/soc/mediatek/gzvm_drv.h>
+
+/**
+ * gzvm_handle_guest_exception() - Handle guest exception
+ * @vcpu: Pointer to struct gzvm_vcpu_run in userspace
+ * Return:
+ * * true - This exception has been processed, no need to back to VMM.
+ * * false - This exception has not been processed, require userspace.
+ */
+bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu)
+{
+ int ret;
+
+ for (int i = 0; i < ARRAY_SIZE(vcpu->run->exception.reserved); i++) {
+ if (vcpu->run->exception.reserved[i])
+ return -EINVAL;
+ }
+
+ switch (vcpu->run->exception.exception) {
+ case GZVM_EXCEPTION_PAGE_FAULT:
+ ret = gzvm_handle_page_fault(vcpu);
+ break;
+ case GZVM_EXCEPTION_UNKNOWN:
+ fallthrough;
+ default:
+ ret = -EFAULT;
+ }
+
+ if (!ret)
+ return true;
+ else
+ return false;
+}
diff --git a/drivers/virt/geniezone/gzvm_main.c b/drivers/virt/geniezone/gzvm_main.c
index 75f643222b91..8f11a27f2723 100644
--- a/drivers/virt/geniezone/gzvm_main.c
+++ b/drivers/virt/geniezone/gzvm_main.c
@@ -28,6 +28,8 @@ int gzvm_err_to_errno(unsigned long err)
return 0;
case ERR_NO_MEMORY:
return -ENOMEM;
+ case ERR_INVALID_ARGS:
+ return -EINVAL;
case ERR_NOT_SUPPORTED:
fallthrough;
case ERR_NOT_IMPLEMENTED:
diff --git a/drivers/virt/geniezone/gzvm_mmu.c b/drivers/virt/geniezone/gzvm_mmu.c
index 3f1272f0e22d..3f7657544c30 100644
--- a/drivers/virt/geniezone/gzvm_mmu.c
+++ b/drivers/virt/geniezone/gzvm_mmu.c
@@ -115,3 +115,44 @@ int gzvm_vm_allocate_guest_page(struct gzvm_memslot *slot, u64 gfn, u64 *pfn)
return 0;
}
+static int handle_single_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
+{
+ int ret;
+ u64 pfn;
+
+ ret = gzvm_vm_allocate_guest_page(&vm->memslot[memslot_id], gfn, &pfn);
+ if (unlikely(ret))
+ return -EFAULT;
+
+ ret = gzvm_arch_map_guest(vm->vm_id, memslot_id, pfn, gfn, 1);
+ if (unlikely(ret))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * gzvm_handle_page_fault() - Handle guest page fault, find corresponding page
+ * for the faulting gpa
+ * @vcpu: Pointer to struct gzvm_vcpu_run of the faulting vcpu
+ *
+ * Return:
+ * * 0 - Success to handle guest page fault
+ * * -EFAULT - Failed to map phys addr to guest's GPA
+ */
+int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu)
+{
+ struct gzvm *vm = vcpu->gzvm;
+ int memslot_id;
+ u64 gfn;
+
+ gfn = PHYS_PFN(vcpu->run->exception.fault_gpa);
+ memslot_id = gzvm_find_memslot(vm, gfn);
+ if (unlikely(memslot_id < 0))
+ return -EFAULT;
+
+ if (unlikely(vm->mem_alloc_mode == GZVM_FULLY_POPULATED))
+ return -EFAULT;
+
+ return handle_single_demand_page(vm, memslot_id, gfn);
+}
diff --git a/drivers/virt/geniezone/gzvm_vcpu.c b/drivers/virt/geniezone/gzvm_vcpu.c
index 388d25e1183b..e8d6f32f325c 100644
--- a/drivers/virt/geniezone/gzvm_vcpu.c
+++ b/drivers/virt/geniezone/gzvm_vcpu.c
@@ -113,9 +113,11 @@ static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void __user *argp)
* it's geniezone's responsibility to fill corresponding data
* structure
*/
- case GZVM_EXIT_HYPERCALL:
- fallthrough;
case GZVM_EXIT_EXCEPTION:
+ if (!gzvm_handle_guest_exception(vcpu))
+ need_userspace = true;
+ break;
+ case GZVM_EXIT_HYPERCALL:
fallthrough;
case GZVM_EXIT_DEBUG:
fallthrough;
diff --git a/drivers/virt/geniezone/gzvm_vm.c b/drivers/virt/geniezone/gzvm_vm.c
index 1fc915b790b8..fc6e58008b92 100644
--- a/drivers/virt/geniezone/gzvm_vm.c
+++ b/drivers/virt/geniezone/gzvm_vm.c
@@ -29,6 +29,31 @@ int gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn,
return 0;
}
+/**
+ * gzvm_find_memslot() - Find memslot containing this @gpa
+ * @vm: Pointer to struct gzvm
+ * @gfn: Guest frame number
+ *
+ * Return:
+ * * >=0 - Index of memslot
+ * * -EFAULT - Not found
+ */
+int gzvm_find_memslot(struct gzvm *vm, u64 gfn)
+{
+ int i;
+
+ for (i = 0; i < GZVM_MAX_MEM_REGION; i++) {
+ if (vm->memslot[i].npages == 0)
+ continue;
+
+ if (gfn >= vm->memslot[i].base_gfn &&
+ gfn < vm->memslot[i].base_gfn + vm->memslot[i].npages)
+ return i;
+ }
+
+ return -EFAULT;
+}
+
/**
* register_memslot_addr_range() - Register memory region to GenieZone
* @gzvm: Pointer to struct gzvm
@@ -60,7 +85,10 @@ register_memslot_addr_range(struct gzvm *gzvm, struct gzvm_memslot *memslot)
}
free_pages_exact(region, buf_size);
- return 0;
+
+ if (gzvm->mem_alloc_mode == GZVM_DEMAND_PAGING)
+ return 0;
+ return gzvm_vm_populate_mem_region(gzvm, memslot->slot_id);
}
/**
@@ -304,6 +332,22 @@ static const struct file_operations gzvm_vm_fops = {
.llseek = noop_llseek,
};
+static int setup_mem_alloc_mode(struct gzvm *vm)
+{
+ int ret;
+ struct gzvm_enable_cap cap = {0};
+
+ cap.cap = GZVM_CAP_ENABLE_DEMAND_PAGING;
+
+ ret = gzvm_vm_ioctl_enable_cap(vm, &cap, NULL);
+ if (!ret)
+ vm->mem_alloc_mode = GZVM_DEMAND_PAGING;
+ else
+ vm->mem_alloc_mode = GZVM_FULLY_POPULATED;
+
+ return 0;
+}
+
static struct gzvm *gzvm_create_vm(unsigned long vm_type)
{
int ret;
@@ -337,6 +381,8 @@ static struct gzvm *gzvm_create_vm(unsigned long vm_type)
return ERR_PTR(ret);
}
+ setup_mem_alloc_mode(gzvm);
+
mutex_lock(&gzvm_list_lock);
list_add(&gzvm->vm_list, &gzvm_list);
mutex_unlock(&gzvm_list_lock);
diff --git a/include/linux/soc/mediatek/gzvm_drv.h b/include/linux/soc/mediatek/gzvm_drv.h
index 798880468991..7ca4ae0de482 100644
--- a/include/linux/soc/mediatek/gzvm_drv.h
+++ b/include/linux/soc/mediatek/gzvm_drv.h
@@ -29,6 +29,7 @@
*/
#define NO_ERROR (0)
#define ERR_NO_MEMORY (-5)
+#define ERR_INVALID_ARGS (-8)
#define ERR_NOT_SUPPORTED (-24)
#define ERR_NOT_IMPLEMENTED (-27)
#define ERR_FAULT (-40)
@@ -43,6 +44,11 @@
#define GZVM_VCPU_RUN_MAP_SIZE (PAGE_SIZE * 2)
+enum gzvm_demand_paging_mode {
+ GZVM_FULLY_POPULATED = 0,
+ GZVM_DEMAND_PAGING = 1,
+};
+
/**
* struct mem_region_addr_range: identical to ffa memory constituent
* @address: the base IPA of the constituent memory region, aligned to 4 kiB
@@ -105,6 +111,7 @@ struct gzvm_vcpu {
* @irq_ack_notifier_list: list head for irq ack notifier
* @irq_srcu: structure data for SRCU(sleepable rcu)
* @irq_lock: lock for irq injection
+ * @mem_alloc_mode: memory allocation mode - fully allocated or demand paging
*/
struct gzvm {
struct gzvm_vcpu *vcpus[GZVM_MAX_VCPUS];
@@ -127,6 +134,7 @@ struct gzvm {
struct hlist_head irq_ack_notifier_list;
struct srcu_struct irq_srcu;
struct mutex irq_lock;
+ u32 mem_alloc_mode;
};
long gzvm_dev_ioctl_check_extension(struct gzvm *gzvm, unsigned long args);
@@ -145,6 +153,8 @@ int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp);
int gzvm_arch_create_vm(unsigned long vm_type);
int gzvm_arch_destroy_vm(u16 vm_id);
+int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
+ u64 nr_pages);
int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
struct gzvm_enable_cap *cap,
void __user *argp);
@@ -166,6 +176,10 @@ int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason);
int gzvm_arch_destroy_vcpu(u16 vm_id, int vcpuid);
int gzvm_arch_inform_exit(u16 vm_id);
+int gzvm_find_memslot(struct gzvm *vm, u64 gpa);
+int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu);
+bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu);
+
int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev);
int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
u32 irq, bool level);
diff --git a/include/uapi/linux/gzvm.h b/include/uapi/linux/gzvm.h
index 7aec4adf2206..61a7a87b3d23 100644
--- a/include/uapi/linux/gzvm.h
+++ b/include/uapi/linux/gzvm.h
@@ -18,6 +18,7 @@
#define GZVM_CAP_VM_GPA_SIZE 0xa5
#define GZVM_CAP_PROTECTED_VM 0xffbadab1
+#define GZVM_CAP_ENABLE_DEMAND_PAGING 0x9202
/* sub-commands put in args[0] for GZVM_CAP_PROTECTED_VM */
#define GZVM_CAP_PVM_SET_PVMFW_GPA 0
@@ -186,6 +187,12 @@ enum {
GZVM_EXIT_GZ = 0x9292000a,
};
+/* exception definitions of GZVM_EXIT_EXCEPTION */
+enum {
+ GZVM_EXCEPTION_UNKNOWN = 0x0,
+ GZVM_EXCEPTION_PAGE_FAULT = 0x1,
+};
+
/**
* struct gzvm_vcpu_run: Same purpose as kvm_run, this struct is
* shared between userspace, kernel and
@@ -250,6 +257,12 @@ struct gzvm_vcpu_run {
__u32 exception;
/* Exception error codes */
__u32 error_code;
+ /* Fault GPA (guest physical address or IPA in ARM) */
+ __u64 fault_gpa;
+ /* Future-proof reservation and reset to zero in hypervisor.
+ * Fill up to the union size, 256 bytes.
+ */
+ __u64 reserved[30];
} exception;
/* GZVM_EXIT_HYPERCALL */
struct {
--
2.18.0
Powered by blists - more mailing lists