[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250213161426.102987-25-steven.price@arm.com>
Date: Thu, 13 Feb 2025 16:14:04 +0000
From: Steven Price <steven.price@....com>
To: kvm@...r.kernel.org,
kvmarm@...ts.linux.dev
Cc: Steven Price <steven.price@....com>,
Catalin Marinas <catalin.marinas@....com>,
Marc Zyngier <maz@...nel.org>,
Will Deacon <will@...nel.org>,
James Morse <james.morse@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
Joey Gouly <joey.gouly@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Christoffer Dall <christoffer.dall@....com>,
Fuad Tabba <tabba@...gle.com>,
linux-coco@...ts.linux.dev,
Ganapatrao Kulkarni <gankulkarni@...amperecomputing.com>,
Gavin Shan <gshan@...hat.com>,
Shanker Donthineni <sdonthineni@...dia.com>,
Alper Gun <alpergun@...gle.com>,
"Aneesh Kumar K . V" <aneesh.kumar@...nel.org>
Subject: [PATCH v7 24/45] KVM: arm64: Handle Realm PSCI requests
The RMM needs to be informed of the target REC when a PSCI call is made
with an MPIDR argument. Expose an ioctl to the userspace in case the PSCI
is handled by it.
Co-developed-by: Suzuki K Poulose <suzuki.poulose@....com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@....com>
Signed-off-by: Steven Price <steven.price@....com>
---
Changes since v6:
* Use vcpu_is_rec() rather than kvm_is_realm(vcpu->kvm).
* Minor renaming/formatting fixes.
---
arch/arm64/include/asm/kvm_rme.h | 3 +++
arch/arm64/kvm/arm.c | 25 +++++++++++++++++++++++++
arch/arm64/kvm/psci.c | 30 ++++++++++++++++++++++++++++++
arch/arm64/kvm/rme.c | 14 ++++++++++++++
4 files changed, 72 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h
index 945927c70746..069a410a756f 100644
--- a/arch/arm64/include/asm/kvm_rme.h
+++ b/arch/arm64/include/asm/kvm_rme.h
@@ -109,6 +109,9 @@ int realm_map_non_secure(struct realm *realm,
kvm_pfn_t pfn,
unsigned long size,
struct kvm_mmu_memory_cache *memcache);
+int realm_psci_complete(struct kvm_vcpu *source,
+ struct kvm_vcpu *target,
+ unsigned long status);
static inline bool kvm_realm_is_private_address(struct realm *realm,
unsigned long addr)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 3e13e3d87ed9..a6718dec00c9 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1730,6 +1730,22 @@ static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
return __kvm_arm_vcpu_set_events(vcpu, events);
}
+static int kvm_arm_vcpu_rmm_psci_complete(struct kvm_vcpu *vcpu,
+ struct kvm_arm_rmm_psci_complete *arg)
+{
+ struct kvm_vcpu *target = kvm_mpidr_to_vcpu(vcpu->kvm, arg->target_mpidr);
+
+ if (!target)
+ return -EINVAL;
+
+ /*
+ * RMM v1.0 only supports PSCI_RET_SUCCESS or PSCI_RET_DENIED
+ * for the status. But, let us leave it to the RMM to filter
+ * for making this future proof.
+ */
+ return realm_psci_complete(vcpu, target, arg->psci_status);
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -1852,6 +1868,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return kvm_arm_vcpu_finalize(vcpu, what);
}
+ case KVM_ARM_VCPU_RMM_PSCI_COMPLETE: {
+ struct kvm_arm_rmm_psci_complete req;
+
+ if (!vcpu_is_rec(vcpu))
+ return -EPERM;
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+ return kvm_arm_vcpu_rmm_psci_complete(vcpu, &req);
+ }
default:
r = -EINVAL;
}
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 3b5dbe9a0a0e..a68f3c1878a5 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -103,6 +103,12 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
reset_state->reset = true;
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
+ /*
+ * Make sure we issue PSCI_COMPLETE before the VCPU can be
+ * scheduled.
+ */
+ if (vcpu_is_rec(vcpu))
+ realm_psci_complete(source_vcpu, vcpu, PSCI_RET_SUCCESS);
/*
* Make sure the reset request is observed if the RUNNABLE mp_state is
@@ -115,6 +121,11 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
out_unlock:
spin_unlock(&vcpu->arch.mp_state_lock);
+ if (vcpu_is_rec(vcpu) && ret != PSCI_RET_SUCCESS) {
+ realm_psci_complete(source_vcpu, vcpu,
+ ret == PSCI_RET_ALREADY_ON ?
+ PSCI_RET_SUCCESS : PSCI_RET_DENIED);
+ }
return ret;
}
@@ -142,6 +153,25 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
/* Ignore other bits of target affinity */
target_affinity &= target_affinity_mask;
+ if (vcpu_is_rec(vcpu)) {
+ struct kvm_vcpu *target_vcpu;
+
+ /* RMM supports only zero affinity level */
+ if (lowest_affinity_level != 0)
+ return PSCI_RET_INVALID_PARAMS;
+
+ target_vcpu = kvm_mpidr_to_vcpu(kvm, target_affinity);
+ if (!target_vcpu)
+ return PSCI_RET_INVALID_PARAMS;
+
+ /*
+ * Provide the references of the source and target RECs to the
+ * RMM so that the RMM can complete the PSCI request.
+ */
+ realm_psci_complete(vcpu, target_vcpu, PSCI_RET_SUCCESS);
+ return PSCI_RET_SUCCESS;
+ }
+
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF
diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c
index 4b5bb7e49c4f..3a7d515c9e9f 100644
--- a/arch/arm64/kvm/rme.c
+++ b/arch/arm64/kvm/rme.c
@@ -145,6 +145,20 @@ static void free_delegated_granule(phys_addr_t phys)
free_page((unsigned long)phys_to_virt(phys));
}
+int realm_psci_complete(struct kvm_vcpu *source, struct kvm_vcpu *target,
+ unsigned long status)
+{
+ int ret;
+
+ ret = rmi_psci_complete(virt_to_phys(source->arch.rec.rec_page),
+ virt_to_phys(target->arch.rec.rec_page),
+ status);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
static int realm_rtt_create(struct realm *realm,
unsigned long addr,
int level,
--
2.43.0
Powered by blists - more mailing lists