[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <yq5a34grsne2.fsf@kernel.org>
Date: Thu, 06 Feb 2025 10:03:25 +0530
From: Aneesh Kumar K.V <aneesh.kumar@...nel.org>
To: Gavin Shan <gshan@...hat.com>, Steven Price <steven.price@....com>,
kvm@...r.kernel.org, kvmarm@...ts.linux.dev
Cc: Catalin Marinas <catalin.marinas@....com>,
Marc Zyngier <maz@...nel.org>, Will Deacon <will@...nel.org>,
James Morse <james.morse@....com>,
Oliver Upton <oliver.upton@...ux.dev>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Joey Gouly <joey.gouly@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Christoffer Dall <christoffer.dall@....com>,
Fuad Tabba <tabba@...gle.com>, linux-coco@...ts.linux.dev,
Ganapatrao Kulkarni <gankulkarni@...amperecomputing.com>,
Shanker Donthineni <sdonthineni@...dia.com>,
Alper Gun <alpergun@...gle.com>
Subject: Re: [PATCH v6 20/43] arm64: RME: Runtime faulting of memory
Gavin Shan <gshan@...hat.com> writes:
> On 12/13/24 1:55 AM, Steven Price wrote:
....
>> +static int private_memslot_fault(struct kvm_vcpu *vcpu,
>> + phys_addr_t fault_ipa,
>> + struct kvm_memory_slot *memslot)
>> +{
>> + struct kvm *kvm = vcpu->kvm;
>> + gpa_t gpa = kvm_gpa_from_fault(kvm, fault_ipa);
>> + gfn_t gfn = gpa >> PAGE_SHIFT;
>> + bool priv_exists = kvm_mem_is_private(kvm, gfn);
>> + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
>> + struct page *page;
>> + kvm_pfn_t pfn;
>> + int ret;
>> + /*
>> + * For Realms, the shared address is an alias of the private GPA with
>> + * the top bit set. Thus is the fault address matches the GPA then it
>> + * is the private alias.
>> + */
>> + bool is_priv_gfn = (gpa == fault_ipa);
>> +
>
> We may rename 'priv_exists' to 'was_priv_gfn', which is consistent to 'is_priv_gfn'.
> Alternatively, we may use 'was_private' and 'is_private'.
>
>> + if (priv_exists != is_priv_gfn) {
>> + kvm_prepare_memory_fault_exit(vcpu,
>> + gpa,
>> + PAGE_SIZE,
>> + kvm_is_write_fault(vcpu),
>> + false, is_priv_gfn);
>> +
>> + return -EFAULT;
>> + }
>> +
>> + if (!is_priv_gfn) {
>> + /* Not a private mapping, handling normally */
>> + return -EINVAL;
>> + }
>> +
>> + ret = kvm_mmu_topup_memory_cache(memcache,
>> + kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
>> + if (ret)
>> + return ret;
>> +
>> + ret = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &page, NULL);
>> + if (ret)
>> + return ret;
>> +
>> + /* FIXME: Should be able to use bigger than PAGE_SIZE mappings */
>> + ret = realm_map_ipa(kvm, fault_ipa, pfn, PAGE_SIZE, KVM_PGTABLE_PROT_W,
>> + memcache);
>> + if (!ret)
>> + return 1; /* Handled */
>> +
>> + put_page(page);
>> + return ret;
>> +}
I also found the names confusing. Can we do
modified arch/arm64/kvm/mmu.c
@@ -1487,7 +1487,7 @@ static int private_memslot_fault(struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm;
gpa_t gpa = kvm_gpa_from_fault(kvm, fault_ipa);
gfn_t gfn = gpa >> PAGE_SHIFT;
- bool priv_exists = kvm_mem_is_private(kvm, gfn);
+ bool is_priv_gfn = kvm_mem_is_private(kvm, gfn);
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
struct page *page;
kvm_pfn_t pfn;
@@ -1497,19 +1497,19 @@ static int private_memslot_fault(struct kvm_vcpu *vcpu,
* the top bit set. Thus is the fault address matches the GPA then it
* is the private alias.
*/
- bool is_priv_gfn = (gpa == fault_ipa);
+ bool is_priv_fault = (gpa == fault_ipa);
- if (priv_exists != is_priv_gfn) {
+ if (is_priv_gfn != is_priv_fault) {
kvm_prepare_memory_fault_exit(vcpu,
gpa,
PAGE_SIZE,
kvm_is_write_fault(vcpu),
- false, is_priv_gfn);
+ false, is_priv_fault);
return 0;
}
- if (!is_priv_gfn) {
+ if (!is_priv_fault) {
/* Not a private mapping, handling normally */
return -EINVAL;
}
Powered by blists - more mailing lists