[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240501142712.GB484338@myrica>
Date: Wed, 1 May 2024 15:27:12 +0100
From: Jean-Philippe Brucker <jean-philippe@...aro.org>
To: Steven Price <steven.price@....com>
Cc: kvm@...r.kernel.org, kvmarm@...ts.linux.dev,
Catalin Marinas <catalin.marinas@....com>,
Marc Zyngier <maz@...nel.org>, Will Deacon <will@...nel.org>,
James Morse <james.morse@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Joey Gouly <joey.gouly@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Christoffer Dall <christoffer.dall@....com>,
Fuad Tabba <tabba@...gle.com>, linux-coco@...ts.linux.dev,
Ganapatrao Kulkarni <gankulkarni@...amperecomputing.com>
Subject: Re: [PATCH v2 17/43] arm64: RME: Allow VMM to set RIPAS
On Fri, Apr 12, 2024 at 09:42:43AM +0100, Steven Price wrote:
> +static inline bool realm_is_addr_protected(struct realm *realm,
> + unsigned long addr)
> +{
> + unsigned int ia_bits = realm->ia_bits;
> +
> + return !(addr & ~(BIT(ia_bits - 1) - 1));
Is it enough to return !(addr & BIT(realm->ia_bits - 1))?
> +static void realm_unmap_range_shared(struct kvm *kvm,
> + int level,
> + unsigned long start,
> + unsigned long end)
> +{
> + struct realm *realm = &kvm->arch.realm;
> + unsigned long rd = virt_to_phys(realm->rd);
> + ssize_t map_size = rme_rtt_level_mapsize(level);
> + unsigned long next_addr, addr;
> + unsigned long shared_bit = BIT(realm->ia_bits - 1);
> +
> + if (WARN_ON(level > RME_RTT_MAX_LEVEL))
> + return;
> +
> + start |= shared_bit;
> + end |= shared_bit;
> +
> + for (addr = start; addr < end; addr = next_addr) {
> + unsigned long align_addr = ALIGN(addr, map_size);
> + int ret;
> +
> + next_addr = ALIGN(addr + 1, map_size);
> +
> + if (align_addr != addr || next_addr > end) {
> + /* Need to recurse deeper */
> + if (addr < align_addr)
> + next_addr = align_addr;
> + realm_unmap_range_shared(kvm, level + 1, addr,
> + min(next_addr, end));
> + continue;
> + }
> +
> + ret = rmi_rtt_unmap_unprotected(rd, addr, level, &next_addr);
> + switch (RMI_RETURN_STATUS(ret)) {
> + case RMI_SUCCESS:
> + break;
> + case RMI_ERROR_RTT:
> + if (next_addr == addr) {
> + next_addr = ALIGN(addr + 1, map_size);
> + realm_unmap_range_shared(kvm, level + 1, addr,
> + next_addr);
> + }
> + break;
> + default:
> + WARN_ON(1);
In this case we also need to return, because RMM returns with next_addr ==
0, causing an infinite loop. At the moment a VMM can trigger this easily
by creating guest memfd before creating a RD, see below
> + }
> + }
> +}
> +
> +static void realm_unmap_range_private(struct kvm *kvm,
> + unsigned long start,
> + unsigned long end)
> +{
> + struct realm *realm = &kvm->arch.realm;
> + ssize_t map_size = RME_PAGE_SIZE;
> + unsigned long next_addr, addr;
> +
> + for (addr = start; addr < end; addr = next_addr) {
> + int ret;
> +
> + next_addr = ALIGN(addr + 1, map_size);
> +
> + ret = realm_destroy_protected(realm, addr, &next_addr);
> +
> + if (WARN_ON(ret))
> + break;
> + }
> +}
> +
> +static void realm_unmap_range(struct kvm *kvm,
> + unsigned long start,
> + unsigned long end,
> + bool unmap_private)
> +{
Should this check for a valid kvm->arch.realm.rd, or a valid realm state?
I'm not sure what the best place is but none of the RMM calls will succeed
if the RD is NULL, causing some WARNs.
I can trigger this with set_memory_attributes() ioctls before creating a
RD for example.
> + realm_unmap_range_shared(kvm, RME_RTT_MAX_LEVEL - 1, start, end);
> + if (unmap_private)
> + realm_unmap_range_private(kvm, start, end);
> +}
> +
> u32 kvm_realm_ipa_limit(void)
> {
> return u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_S2SZ);
> @@ -190,6 +341,30 @@ static int realm_rtt_destroy(struct realm *realm, unsigned long addr,
> return ret;
> }
>
> +static int realm_create_rtt_levels(struct realm *realm,
> + unsigned long ipa,
> + int level,
> + int max_level,
> + struct kvm_mmu_memory_cache *mc)
> +{
> + if (WARN_ON(level == max_level))
> + return 0;
> +
> + while (level++ < max_level) {
> + phys_addr_t rtt = alloc_delegated_page(realm, mc);
> +
> + if (rtt == PHYS_ADDR_MAX)
> + return -ENOMEM;
> +
> + if (realm_rtt_create(realm, ipa, level, rtt)) {
> + free_delegated_page(realm, rtt);
> + return -ENXIO;
> + }
> + }
> +
> + return 0;
> +}
> +
> static int realm_tear_down_rtt_level(struct realm *realm, int level,
> unsigned long start, unsigned long end)
> {
> @@ -265,6 +440,68 @@ static int realm_tear_down_rtt_range(struct realm *realm,
> start, end);
> }
>
> +/*
> + * Returns 0 on successful fold, a negative value on error, a positive value if
> + * we were not able to fold all tables at this level.
> + */
> +static int realm_fold_rtt_level(struct realm *realm, int level,
> + unsigned long start, unsigned long end)
> +{
> + int not_folded = 0;
> + ssize_t map_size;
> + unsigned long addr, next_addr;
> +
> + if (WARN_ON(level > RME_RTT_MAX_LEVEL))
> + return -EINVAL;
> +
> + map_size = rme_rtt_level_mapsize(level - 1);
> +
> + for (addr = start; addr < end; addr = next_addr) {
> + phys_addr_t rtt_granule;
> + int ret;
> + unsigned long align_addr = ALIGN(addr, map_size);
> +
> + next_addr = ALIGN(addr + 1, map_size);
> +
> + ret = realm_rtt_fold(realm, align_addr, level, &rtt_granule);
> +
> + switch (RMI_RETURN_STATUS(ret)) {
> + case RMI_SUCCESS:
> + if (!WARN_ON(rmi_granule_undelegate(rtt_granule)))
> + free_page((unsigned long)phys_to_virt(rtt_granule));
> + break;
> + case RMI_ERROR_RTT:
> + if (level == RME_RTT_MAX_LEVEL ||
> + RMI_RETURN_INDEX(ret) < level) {
> + not_folded++;
> + break;
> + }
> + /* Recurse a level deeper */
> + ret = realm_fold_rtt_level(realm,
> + level + 1,
> + addr,
> + next_addr);
> + if (ret < 0)
> + return ret;
> + else if (ret == 0)
> + /* Try again at this level */
> + next_addr = addr;
> + break;
> + default:
Maybe this also deserves a WARN() to be consistent with the other RMI
calls
Thanks,
Jean
> + return -ENXIO;
> + }
> + }
> +
> + return not_folded;
> +}
Powered by blists - more mailing lists