[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <yq5a5xphmv4n.fsf@kernel.org>
Date: Thu, 24 Oct 2024 20:00:00 +0530
From: Aneesh Kumar K.V <aneesh.kumar@...nel.org>
To: Steven Price <steven.price@....com>, kvm@...r.kernel.org,
kvmarm@...ts.linux.dev
Cc: Steven Price <steven.price@....com>,
Catalin Marinas <catalin.marinas@....com>,
Marc Zyngier <maz@...nel.org>, Will Deacon <will@...nel.org>,
James Morse <james.morse@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Joey Gouly <joey.gouly@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Christoffer Dall <christoffer.dall@....com>,
Fuad Tabba <tabba@...gle.com>, linux-coco@...ts.linux.dev,
Ganapatrao Kulkarni <gankulkarni@...amperecomputing.com>,
Gavin Shan <gshan@...hat.com>,
Shanker Donthineni <sdonthineni@...dia.com>,
Alper Gun <alpergun@...gle.com>
Subject: Re: [PATCH v5 21/43] arm64: RME: Runtime faulting of memory
Steven Price <steven.price@....com> writes:
> +static int realm_map_ipa(struct kvm *kvm, phys_addr_t ipa,
> + kvm_pfn_t pfn, unsigned long map_size,
> + enum kvm_pgtable_prot prot,
> + struct kvm_mmu_memory_cache *memcache)
> +{
> + struct realm *realm = &kvm->arch.realm;
> + struct page *page = pfn_to_page(pfn);
> +
> + if (WARN_ON(!(prot & KVM_PGTABLE_PROT_W)))
> + return -EFAULT;
> +
> + if (!realm_is_addr_protected(realm, ipa))
> + return realm_map_non_secure(realm, ipa, page, map_size,
> + memcache);
> +
> + return realm_map_protected(realm, ipa, page, map_size, memcache);
> +}
> +
Some of these pfn_to_page(pfn) conversions can be avoided because the
callers are essentially expecting a pfn value (converting page_to_phys())
It also helps to clarify whether we are operating on a compound page or
not.
Something like below?
diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h
index cd42c19ca21d..bf5702c8dbee 100644
--- a/arch/arm64/include/asm/kvm_rme.h
+++ b/arch/arm64/include/asm/kvm_rme.h
@@ -110,13 +110,13 @@ void kvm_realm_unmap_range(struct kvm *kvm,
bool unmap_private);
int realm_map_protected(struct realm *realm,
unsigned long base_ipa,
- struct page *dst_page,
- unsigned long map_size,
+ kvm_pfn_t pfn,
+ unsigned long size,
struct kvm_mmu_memory_cache *memcache);
int realm_map_non_secure(struct realm *realm,
unsigned long ipa,
- struct page *page,
- unsigned long map_size,
+ kvm_pfn_t pfn,
+ unsigned long size,
struct kvm_mmu_memory_cache *memcache);
int realm_set_ipa_state(struct kvm_vcpu *vcpu,
unsigned long addr, unsigned long end,
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 569f63695bef..254e90c014cf 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1452,16 +1452,15 @@ static int realm_map_ipa(struct kvm *kvm, phys_addr_t ipa,
struct kvm_mmu_memory_cache *memcache)
{
struct realm *realm = &kvm->arch.realm;
- struct page *page = pfn_to_page(pfn);
if (WARN_ON(!(prot & KVM_PGTABLE_PROT_W)))
return -EFAULT;
if (!realm_is_addr_protected(realm, ipa))
- return realm_map_non_secure(realm, ipa, page, map_size,
+ return realm_map_non_secure(realm, ipa, pfn, map_size,
memcache);
- return realm_map_protected(realm, ipa, page, map_size, memcache);
+ return realm_map_protected(realm, ipa, pfn, map_size, memcache);
}
static int private_memslot_fault(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c
index 4064a2ce5c64..953d5cdf7ead 100644
--- a/arch/arm64/kvm/rme.c
+++ b/arch/arm64/kvm/rme.c
@@ -676,15 +676,15 @@ void kvm_realm_unmap_range(struct kvm *kvm, unsigned long ipa, u64 size,
static int realm_create_protected_data_page(struct realm *realm,
unsigned long ipa,
- struct page *dst_page,
- struct page *src_page,
+ kvm_pfn_t dst_pfn,
+ kvm_pfn_t src_pfn,
unsigned long flags)
{
phys_addr_t dst_phys, src_phys;
int ret;
- dst_phys = page_to_phys(dst_page);
- src_phys = page_to_phys(src_page);
+ dst_phys = __pfn_to_phys(dst_pfn);
+ src_phys = __pfn_to_phys(src_pfn);
if (rmi_granule_delegate(dst_phys))
return -ENXIO;
@@ -711,7 +711,7 @@ static int realm_create_protected_data_page(struct realm *realm,
err:
if (WARN_ON(rmi_granule_undelegate(dst_phys))) {
/* Page can't be returned to NS world so is lost */
- get_page(dst_page);
+ get_page(pfn_to_page(dst_pfn));
}
return -ENXIO;
}
@@ -741,15 +741,14 @@ static phys_addr_t rtt_get_phys(struct realm *realm, struct rtt_entry *rtt)
}
int realm_map_protected(struct realm *realm,
- unsigned long base_ipa,
- struct page *dst_page,
+ unsigned long ipa,
+ kvm_pfn_t pfn,
unsigned long map_size,
struct kvm_mmu_memory_cache *memcache)
{
- phys_addr_t dst_phys = page_to_phys(dst_page);
+ phys_addr_t phys = __pfn_to_phys(pfn);
phys_addr_t rd = virt_to_phys(realm->rd);
- unsigned long phys = dst_phys;
- unsigned long ipa = base_ipa;
+ unsigned long base_ipa = ipa;
unsigned long size;
int map_level;
int ret = 0;
@@ -860,14 +859,14 @@ int realm_map_protected(struct realm *realm,
int realm_map_non_secure(struct realm *realm,
unsigned long ipa,
- struct page *page,
+ kvm_pfn_t pfn,
unsigned long map_size,
struct kvm_mmu_memory_cache *memcache)
{
phys_addr_t rd = virt_to_phys(realm->rd);
int map_level;
int ret = 0;
- unsigned long desc = page_to_phys(page) |
+ unsigned long desc = __pfn_to_phys(pfn) |
PTE_S2_MEMATTR(MT_S2_FWB_NORMAL) |
/* FIXME: Read+Write permissions for now */
(3 << 6);
@@ -951,7 +950,6 @@ static int populate_par_region(struct kvm *kvm,
unsigned int vma_shift;
unsigned long offset;
unsigned long hva;
- struct page *page;
kvm_pfn_t pfn;
int level;
@@ -1000,10 +998,8 @@ static int populate_par_region(struct kvm *kvm,
RME_RTT_MAX_LEVEL, NULL);
}
- page = pfn_to_page(pfn);
-
for (offset = 0; offset < map_size && !ret;
- offset += PAGE_SIZE, page++) {
+ offset += PAGE_SIZE, pfn++) {
phys_addr_t page_ipa = ipa + offset;
kvm_pfn_t priv_pfn;
int order;
@@ -1015,8 +1011,8 @@ static int populate_par_region(struct kvm *kvm,
break;
ret = realm_create_protected_data_page(realm, page_ipa,
- pfn_to_page(priv_pfn),
- page, data_flags);
+ priv_pfn,
+ pfn, data_flags);
}
kvm_release_pfn_clean(pfn);
Powered by blists - more mailing lists