[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID:
<SA1PR12MB7199835E63E1EF48C7C7638DB07DA@SA1PR12MB7199.namprd12.prod.outlook.com>
Date: Thu, 19 Jun 2025 12:14:38 +0000
From: Ankit Agrawal <ankita@...dia.com>
To: Jason Gunthorpe <jgg@...dia.com>, Catalin Marinas
<catalin.marinas@....com>
CC: "maz@...nel.org" <maz@...nel.org>, "oliver.upton@...ux.dev"
<oliver.upton@...ux.dev>, "joey.gouly@....com" <joey.gouly@....com>,
"suzuki.poulose@....com" <suzuki.poulose@....com>, "yuzenghui@...wei.com"
<yuzenghui@...wei.com>, "will@...nel.org" <will@...nel.org>,
"ryan.roberts@....com" <ryan.roberts@....com>, "shahuang@...hat.com"
<shahuang@...hat.com>, "lpieralisi@...nel.org" <lpieralisi@...nel.org>,
"david@...hat.com" <david@...hat.com>, "ddutile@...hat.com"
<ddutile@...hat.com>, "seanjc@...gle.com" <seanjc@...gle.com>, Aniket Agashe
<aniketa@...dia.com>, Neo Jia <cjia@...dia.com>, Kirti Wankhede
<kwankhede@...dia.com>, Krishnakant Jaju <kjaju@...dia.com>, "Tarun Gupta
(SW-GPU)" <targupta@...dia.com>, Vikram Sethi <vsethi@...dia.com>, Andy
Currid <acurrid@...dia.com>, Alistair Popple <apopple@...dia.com>, John
Hubbard <jhubbard@...dia.com>, Dan Williams <danw@...dia.com>, Zhi Wang
<zhiw@...dia.com>, Matt Ochs <mochs@...dia.com>, Uday Dhoke
<udhoke@...dia.com>, Dheeraj Nigam <dnigam@...dia.com>,
"alex.williamson@...hat.com" <alex.williamson@...hat.com>,
"sebastianene@...gle.com" <sebastianene@...gle.com>, "coltonlewis@...gle.com"
<coltonlewis@...gle.com>, "kevin.tian@...el.com" <kevin.tian@...el.com>,
"yi.l.liu@...el.com" <yi.l.liu@...el.com>, "ardb@...nel.org"
<ardb@...nel.org>, "akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"gshan@...hat.com" <gshan@...hat.com>, "linux-mm@...ck.org"
<linux-mm@...ck.org>, "tabba@...gle.com" <tabba@...gle.com>,
"qperret@...gle.com" <qperret@...gle.com>, "kvmarm@...ts.linux.dev"
<kvmarm@...ts.linux.dev>, "linux-kernel@...r.kernel.org"
<linux-kernel@...r.kernel.org>, "linux-arm-kernel@...ts.infradead.org"
<linux-arm-kernel@...ts.infradead.org>, "maobibo@...ngson.cn"
<maobibo@...ngson.cn>
Subject: Re: [PATCH v7 4/5] KVM: arm64: Allow cacheable stage 2 mapping using
VMA flags
>> > - disable_cmo = true;
>> > + if (!is_vma_cacheable)
>> > + disable_cmo = true;
>>
>> I'm tempted to stick to the 'device' variable name. Or something like
>> s2_noncacheable. As I commented, it's not just about disabling CMOs.
>
> I think it would be clearer to have two concepts/variable then because
> the cases where it is really about preventing cachable access to
> prevent aborts are not linked to the logic that checks pfn valid. We
> have to detect those cases separately (through the VMA flags was it?).
>
> Having these two things together is IMHO confusing..
>
> Jason
Thanks Catalin and Jason for the comments.
Considering the feedback, I think we may do the following here:
1. Rename the device variable to S2_noncacheable to represent if the S2
is going to be marked non cacheable. Otherwise S2 will be mapped NORMAL.
2. Detect what PFN has to be marked S2_noncacheable. If a PFN is not in the
kernel map, mark as S2 except for PFNMAP + VMA cacheable.
3. Prohibit cacheable PFNMAP if hardware doesn't support FWB and CACHE DIC.
4. Prohibit S2 non cached mapping for cacheable VMA for all cases, whether
pre-FWB hardware or not.
This would be how the patch would look.
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 339194441a25..979668d475bd 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1516,8 +1516,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
{
int ret = 0;
bool write_fault, writable, force_pte = false;
- bool exec_fault, mte_allowed, is_vma_cacheable;
- bool device = false, vfio_allow_any_uc = false;
+ bool exec_fault, mte_allowed, is_vma_cacheable, cacheable_pfnmap = false;
+ bool s2_noncacheable = false, vfio_allow_any_uc = false;
unsigned long mmu_seq;
phys_addr_t ipa = fault_ipa;
struct kvm *kvm = vcpu->kvm;
@@ -1660,6 +1660,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
is_vma_cacheable = kvm_vma_is_cacheable(vma);
+ if (vma->vm_flags & VM_PFNMAP) {
+ /* Reject COW VM_PFNMAP */
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
+ if (is_vma_cacheable)
+ cacheable_pfnmap = true;
+ }
+
/* Don't use the VMA after the unlock -- it may have vanished */
vma = NULL;
@@ -1684,8 +1693,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
if (kvm_is_device_pfn(pfn)) {
- if (is_vma_cacheable)
- return -EINVAL;
+ /*
+ * When FWB is unsupported KVM needs to do cache flushes
+ * (via dcache_clean_inval_poc()) of the underlying memory. This is
+ * only possible if the memory is already mapped into the kernel map.
+ *
+ * Outright reject as the cacheable device memory is not present in
+ * the kernel map and not suitable for cache management.
+ */
+ if (cacheable_pfnmap && !kvm_arch_supports_cacheable_pfnmap())
+ return -EFAULT;
/*
* If the page was identified as device early by looking at
@@ -1696,8 +1713,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
*
* In both cases, we don't let transparent_hugepage_adjust()
* change things at the last minute.
+ *
+ * Allow S2 to be mapped cacheable for PFNMAP device memory
+ * marked as cacheable in VMA. Note that such mapping is safe
+ * as the KVM S2 will have the same Normal memory type as the
+ * VMA has in the S1.
*/
- device = true;
+ if (!cacheable_pfnmap)
+ s2_noncacheable = true;
} else if (logging_active && !write_fault) {
/*
* Only actually map the page as writable if this was a write
@@ -1706,7 +1729,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
writable = false;
}
- if (exec_fault && device)
+ /*
+ * Prohibit a region to be mapped non cacheable in S2 and marked as
+ * cacheabled in the userspace VMA. Such mismatched mapping is a
+ * security risk.
+ */
+ if (is_vma_cacheable && s2_noncacheable)
+ return -EINVAL;
+
+ if (exec_fault && s2_noncacheable)
return -ENOEXEC;
/*
@@ -1739,7 +1770,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
- if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
+ if (vma_pagesize == PAGE_SIZE && !(force_pte || s2_noncacheable)) {
if (fault_is_perm && fault_granule > PAGE_SIZE)
vma_pagesize = fault_granule;
else
@@ -1753,7 +1784,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
}
- if (!fault_is_perm && !device && kvm_has_mte(kvm)) {
+ if (!fault_is_perm && !s2_noncacheable && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new disallowed VMA */
if (mte_allowed) {
sanitise_mte_tags(kvm, pfn, vma_pagesize);
@@ -1769,7 +1800,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault)
prot |= KVM_PGTABLE_PROT_X;
- if (device) {
+ if (s2_noncacheable) {
if (vfio_allow_any_uc)
prot |= KVM_PGTABLE_PROT_NORMAL_NC;
else
@@ -2266,8 +2297,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
break;
}
- /* Cacheable PFNMAP is not allowed */
- if (kvm_vma_is_cacheable(vma)) {
+ /*
+ * Cacheable PFNMAP is allowed only if the hardware
+ * supports it.
+ */
+ if (kvm_vma_is_cacheable(vma) &&
+ !kvm_arch_supports_cacheable_pfnmap()) {
ret = -EINVAL;
break;
}
Powered by blists - more mailing lists