[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220401143256.1950537-42-sashal@kernel.org>
Date: Fri, 1 Apr 2022 10:31:49 -0400
From: Sasha Levin <sashal@...nel.org>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: Marc Zyngier <maz@...nel.org>,
Alexandru Elisei <alexandru.elisei@....com>,
Sasha Levin <sashal@...nel.org>, pbonzini@...hat.com,
corbet@....net, catalin.marinas@....com, will@...nel.org,
oupton@...gle.com, drjones@...hat.com, kvm@...r.kernel.org,
linux-doc@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
kvmarm@...ts.cs.columbia.edu
Subject: [PATCH AUTOSEL 5.16 042/109] KVM: arm64: Do not change the PMU event filter after a VCPU has run
From: Marc Zyngier <maz@...nel.org>
[ Upstream commit 5177fe91e4cf78a659aada2c9cf712db4d788481 ]
Userspace can specify which events a guest is allowed to use with the
KVM_ARM_VCPU_PMU_V3_FILTER attribute. The list of allowed events can be
identified by a guest from reading the PMCEID{0,1}_EL0 registers.
Changing the PMU event filter after a VCPU has run can cause reads of the
registers performed before the filter is changed to return different values
than reads performed with the new event filter in place. The architecture
defines the two registers as read-only, and this behaviour contradicts
that.
Keep track when the first VCPU has run and deny changes to the PMU event
filter to prevent this from happening.
Signed-off-by: Marc Zyngier <maz@...nel.org>
[ Alexandru E: Added commit message, updated ioctl documentation ]
Signed-off-by: Alexandru Elisei <alexandru.elisei@....com>
Signed-off-by: Marc Zyngier <maz@...nel.org>
Link: https://lore.kernel.org/r/20220127161759.53553-2-alexandru.elisei@arm.com
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
Documentation/virt/kvm/devices/vcpu.rst | 2 +-
arch/arm64/include/asm/kvm_host.h | 1 +
arch/arm64/kvm/arm.c | 4 +++
arch/arm64/kvm/pmu-emul.c | 33 +++++++++++++++----------
4 files changed, 26 insertions(+), 14 deletions(-)
diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst
index 60a29972d3f1..d063aaee5bb7 100644
--- a/Documentation/virt/kvm/devices/vcpu.rst
+++ b/Documentation/virt/kvm/devices/vcpu.rst
@@ -70,7 +70,7 @@ irqchip.
-ENODEV PMUv3 not supported or GIC not initialized
-ENXIO PMUv3 not properly configured or in-kernel irqchip not
configured as required prior to calling this attribute
- -EBUSY PMUv3 already initialized
+ -EBUSY PMUv3 already initialized or a VCPU has already run
-EINVAL Invalid filter range
======= ======================================================
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d016f27af6da..8c7ba346d713 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -137,6 +137,7 @@ struct kvm_arch {
/* Memory Tagging Extension enabled for the guest */
bool mte_enabled;
+ bool ran_once;
};
struct kvm_vcpu_fault_info {
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 1eadf9088880..6f3be4d44abe 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -629,6 +629,10 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
if (kvm_vm_is_protected(kvm))
kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
+ mutex_lock(&kvm->lock);
+ kvm->arch.ran_once = true;
+ mutex_unlock(&kvm->lock);
+
return ret;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index a5e4bbf5e68f..c996fc562da4 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -921,6 +921,8 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
+ struct kvm *kvm = vcpu->kvm;
+
if (!kvm_vcpu_has_pmu(vcpu))
return -ENODEV;
@@ -938,7 +940,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int __user *uaddr = (int __user *)(long)attr->addr;
int irq;
- if (!irqchip_in_kernel(vcpu->kvm))
+ if (!irqchip_in_kernel(kvm))
return -EINVAL;
if (get_user(irq, uaddr))
@@ -948,7 +950,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
return -EINVAL;
- if (!pmu_irq_is_valid(vcpu->kvm, irq))
+ if (!pmu_irq_is_valid(kvm, irq))
return -EINVAL;
if (kvm_arm_pmu_irq_initialized(vcpu))
@@ -963,7 +965,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
struct kvm_pmu_event_filter filter;
int nr_events;
- nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
+ nr_events = kvm_pmu_event_mask(kvm) + 1;
uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
@@ -975,12 +977,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
filter.action != KVM_PMU_EVENT_DENY))
return -EINVAL;
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&kvm->lock);
+
+ if (kvm->arch.ran_once) {
+ mutex_unlock(&kvm->lock);
+ return -EBUSY;
+ }
- if (!vcpu->kvm->arch.pmu_filter) {
- vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
- if (!vcpu->kvm->arch.pmu_filter) {
- mutex_unlock(&vcpu->kvm->lock);
+ if (!kvm->arch.pmu_filter) {
+ kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
+ if (!kvm->arch.pmu_filter) {
+ mutex_unlock(&kvm->lock);
return -ENOMEM;
}
@@ -991,17 +998,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
* events, the default is to allow.
*/
if (filter.action == KVM_PMU_EVENT_ALLOW)
- bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events);
+ bitmap_zero(kvm->arch.pmu_filter, nr_events);
else
- bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events);
+ bitmap_fill(kvm->arch.pmu_filter, nr_events);
}
if (filter.action == KVM_PMU_EVENT_ALLOW)
- bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
+ bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
else
- bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
+ bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&kvm->lock);
return 0;
}
--
2.34.1
Powered by blists - more mailing lists