[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20250919155056.2648137-1-vdonnefort@google.com>
Date: Fri, 19 Sep 2025 16:50:56 +0100
From: Vincent Donnefort <vdonnefort@...gle.com>
To: maz@...nel.org, oliver.upton@...ux.dev, joey.gouly@....com,
suzuki.poulose@....com, yuzenghui@...wei.com, catalin.marinas@....com,
will@...nel.org
Cc: qperret@...gle.com, sebastianene@...gle.com, keirf@...gle.com,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, kernel-team@...roid.com,
Vincent Donnefort <vdonnefort@...gle.com>
Subject: [PATCH v2] KVM: arm64: Check range args for pKVM mem transitions
There's currently no verification for host issued ranges in most of the
pKVM memory transitions. The subsequent end boundary might therefore be
subject to overflow and could evade the later checks.
Close this loophole with an additional check_range_args() check on a per
public function basis.
host_unshare_guest transition is already protected via
__check_host_shared_guest(), while assert_host_shared_guest() callers
are already ignoring host checks.
Signed-off-by: Vincent Donnefort <vdonnefort@...gle.com>
---
v1 -> v2:
- Also check for (nr_pages * PAGE_SIZE) overflow. (Quentin)
- Rename to check_range_args().
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 8957734d6183..65fcd2148f59 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -712,6 +712,14 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr,
return check_page_state_range(&vm->pgt, addr, size, &d);
}
+static bool check_range_args(u64 start, u64 nr_pages, u64 *size)
+{
+ if (check_mul_overflow(nr_pages, PAGE_SIZE, size))
+ return false;
+
+ return start < (start + *size);
+}
+
int __pkvm_host_share_hyp(u64 pfn)
{
u64 phys = hyp_pfn_to_phys(pfn);
@@ -772,10 +780,13 @@ int __pkvm_host_unshare_hyp(u64 pfn)
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
- u64 size = PAGE_SIZE * nr_pages;
void *virt = __hyp_va(phys);
+ u64 size;
int ret;
+ if (!check_range_args(phys, nr_pages, &size))
+ return -EINVAL;
+
host_lock_component();
hyp_lock_component();
@@ -800,10 +811,13 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
- u64 size = PAGE_SIZE * nr_pages;
u64 virt = (u64)__hyp_va(phys);
+ u64 size;
int ret;
+ if (!check_range_args(phys, nr_pages, &size))
+ return -EINVAL;
+
host_lock_component();
hyp_lock_component();
@@ -884,9 +898,12 @@ void hyp_unpin_shared_mem(void *from, void *to)
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
- u64 size = PAGE_SIZE * nr_pages;
+ u64 size;
int ret;
+ if (!check_range_args(phys, nr_pages, &size))
+ return -EINVAL;
+
host_lock_component();
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (!ret)
@@ -899,9 +916,12 @@ int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
- u64 size = PAGE_SIZE * nr_pages;
+ u64 size;
int ret;
+ if (!check_range_args(phys, nr_pages, &size))
+ return -EINVAL;
+
host_lock_component();
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED);
if (!ret)
@@ -945,6 +965,9 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
+ if (!check_range_args(phys, nr_pages, &size))
+ return -EINVAL;
+
ret = __guest_check_transition_size(phys, ipa, nr_pages, &size);
if (ret)
return ret;
base-commit: 8b789f2b7602a818e7c7488c74414fae21392b63
--
2.51.0.470.ga7dc726c21-goog
Powered by blists - more mailing lists