[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200116124626.1155-7-yezengruan@huawei.com>
Date: Thu, 16 Jan 2020 20:46:24 +0800
From: Zengruan Ye <yezengruan@...wei.com>
To: <linux-kernel@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<kvmarm@...ts.cs.columbia.edu>, <kvm@...r.kernel.org>,
<linux-doc@...r.kernel.org>,
<virtualization@...ts.linux-foundation.org>
CC: <yezengruan@...wei.com>, <maz@...nel.org>, <james.morse@....com>,
<linux@...linux.org.uk>, <suzuki.poulose@....com>,
<julien.thierry.kdev@...il.com>, <catalin.marinas@....com>,
<mark.rutland@....com>, <will@...nel.org>, <steven.price@....com>,
<daniel.lezcano@...aro.org>, <wanghaibin.wang@...wei.com>,
<peterz@...radead.org>, <longman@...hat.com>
Subject: [PATCH v3 6/8] KVM: arm64: Provide vCPU attributes for PV lock
Allow user space to inform the KVM host where in the physical memory
map the paravirtualized lock structures should be located.
User space can set an attribute on the vCPU providing the IPA base
address of the PV lock structure for that vCPU. This must be
repeated for every vCPU in the VM.
The address is given in terms of the physical address visible to
the guest and must be 64 byte aligned. The guest will discover the
address via a hypercall.
Signed-off-by: Zengruan Ye <yezengruan@...wei.com>
---
arch/arm64/include/asm/kvm_host.h | 9 +++++
arch/arm64/include/uapi/asm/kvm.h | 2 ++
arch/arm64/kvm/guest.c | 9 +++++
include/uapi/linux/kvm.h | 2 ++
virt/kvm/arm/pvlock.c | 57 +++++++++++++++++++++++++++++++
5 files changed, 79 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 10f8c4bbf97e..46409f31d652 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -520,6 +520,15 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
return (vcpu_arch->steal.base != GPA_INVALID);
}
+int kvm_arm_pvlock_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+
+int kvm_arm_pvlock_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+
+int kvm_arm_pvlock_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+
static inline void kvm_arm_pvlock_preempted_init(struct kvm_vcpu_arch *vcpu_arch)
{
vcpu_arch->pv.base = GPA_INVALID;
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 820e5751ada7..137d966b57c7 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -326,6 +326,8 @@ struct kvm_vcpu_events {
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
#define KVM_ARM_VCPU_PVTIME_CTRL 2
#define KVM_ARM_VCPU_PVTIME_IPA 0
+#define KVM_ARM_VCPU_PVLOCK_CTRL 3
+#define KVM_ARM_VCPU_PVLOCK_IPA 0
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_VCPU2_SHIFT 28
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2fff06114a8f..6a5c12f3b08b 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -875,6 +875,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_set_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVLOCK_CTRL:
+ ret = kvm_arm_pvlock_set_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -898,6 +901,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_get_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVLOCK_CTRL:
+ ret = kvm_arm_pvlock_get_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -921,6 +927,9 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PVTIME_CTRL:
ret = kvm_arm_pvtime_has_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVLOCK_CTRL:
+ ret = kvm_arm_pvlock_has_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f0a16b4adbbd..bfc628c580d4 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1238,6 +1238,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_ARM_PV_TIME,
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
+ KVM_DEV_TYPE_ARM_PV_LOCK,
+#define KVM_DEV_TYPE_ARM_PV_LOCK KVM_DEV_TYPE_ARM_PV_LOCK
KVM_DEV_TYPE_MAX,
};
diff --git a/virt/kvm/arm/pvlock.c b/virt/kvm/arm/pvlock.c
index 0644b23be51e..5285257b6a35 100644
--- a/virt/kvm/arm/pvlock.c
+++ b/virt/kvm/arm/pvlock.c
@@ -43,3 +43,60 @@ void kvm_update_pvlock_preempted(struct kvm_vcpu *vcpu, u64 preempted)
kvm_put_guest(kvm, base + offset, preempted_le, u64);
srcu_read_unlock(&kvm->srcu, idx);
}
+
+int kvm_arm_pvlock_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ u64 __user *user = (u64 __user *)attr->addr;
+ struct kvm *kvm = vcpu->kvm;
+ u64 ipa;
+ int ret = 0;
+ int idx;
+
+ if (attr->attr != KVM_ARM_VCPU_PVLOCK_IPA)
+ return -ENXIO;
+
+ if (get_user(ipa, user))
+ return -EFAULT;
+ if (!IS_ALIGNED(ipa, 64))
+ return -EINVAL;
+ if (vcpu->arch.pv.base != GPA_INVALID)
+ return -EEXIST;
+
+ /* Check the address is in a valid memslot */
+ idx = srcu_read_lock(&kvm->srcu);
+ if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
+ ret = -EINVAL;
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ if (!ret)
+ vcpu->arch.pv.base = ipa;
+
+ return ret;
+}
+
+int kvm_arm_pvlock_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ u64 __user *user = (u64 __user *)attr->addr;
+ u64 ipa;
+
+ if (attr->attr != KVM_ARM_VCPU_PVLOCK_IPA)
+ return -ENXIO;
+
+ ipa = vcpu->arch.pv.base;
+
+ if (put_user(ipa, user))
+ return -EFAULT;
+ return 0;
+}
+
+int kvm_arm_pvlock_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_ARM_VCPU_PVLOCK_IPA:
+ return 0;
+ }
+ return -ENXIO;
+}
--
2.19.1
Powered by blists - more mailing lists