[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211013155831.943476-14-qperret@google.com>
Date: Wed, 13 Oct 2021 16:58:28 +0100
From: Quentin Perret <qperret@...gle.com>
To: Marc Zyngier <maz@...nel.org>, James Morse <james.morse@....com>,
Alexandru Elisei <alexandru.elisei@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Fuad Tabba <tabba@...gle.com>,
David Brazdil <dbrazdil@...gle.com>
Cc: linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: [PATCH 13/16] KVM: arm64: Move double-sharing logic into hyp-specific function
From: Will Deacon <will@...nel.org>
Strictly speaking, double-sharing a page is an invalid transition and
should be rejected, however we allow this in order to simplify the
book-keeping when KVM metadata (such as vcpu structures) co-exists in
the same page.
Given that double-sharing is only required for pages shared with the
hypervisor by the host, move the handling into a hyp-specific function
to check incoming shares, therefore preventing double-sharing outside
of this particular transition.
Signed-off-by: Will Deacon <will@...nel.org>
Signed-off-by: Quentin Perret <qperret@...gle.com>
---
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 57 +++++++++++++++++++--------
1 file changed, 41 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 909e60f71b06..3378117d010c 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -536,6 +536,33 @@ static int ack_share(struct pkvm_page_share_ack *ack,
}
}
+static int hyp_check_incoming_share(struct pkvm_page_req *req,
+ struct pkvm_page_share_ack *ack,
+ enum pkvm_component_id initiator,
+ enum kvm_pgtable_prot prot)
+{
+ /*
+ * We allow the host to share the same page twice, but that means we
+ * have to check that the states really do match exactly.
+ */
+ if (initiator != PKVM_ID_HOST)
+ return -EPERM;
+
+ if (req->initiator.state != PKVM_PAGE_SHARED_OWNED)
+ return -EPERM;
+
+ if (ack->completer.state != PKVM_PAGE_SHARED_BORROWED)
+ return -EPERM;
+
+ if (ack->completer.phys != req->phys)
+ return -EPERM;
+
+ if (ack->completer.prot != prot)
+ return -EPERM;
+
+ return 0;
+}
+
/*
* Check that the page states in the initiator and the completer are compatible
* for the requested page-sharing operation to go ahead.
@@ -544,6 +571,8 @@ static int check_share(struct pkvm_page_req *req,
struct pkvm_page_share_ack *ack,
struct pkvm_mem_share *share)
{
+ struct pkvm_mem_transition *tx = &share->tx;
+
if (!addr_is_memory(req->phys))
return -EINVAL;
@@ -552,25 +581,22 @@ static int check_share(struct pkvm_page_req *req,
return 0;
}
- if (req->initiator.state != PKVM_PAGE_SHARED_OWNED)
- return -EPERM;
-
- if (ack->completer.state != PKVM_PAGE_SHARED_BORROWED)
- return -EPERM;
-
- if (ack->completer.phys != req->phys)
- return -EPERM;
-
- if (ack->completer.prot != share->prot)
+ switch (tx->completer.id) {
+ case PKVM_ID_HYP:
+ return hyp_check_incoming_share(req, ack, tx->initiator.id,
+ share->prot);
+ default:
return -EPERM;
-
- return 0;
+ }
}
static int host_initiate_share(struct pkvm_page_req *req)
{
enum kvm_pgtable_prot prot;
+ if (req->initiator.state == PKVM_PAGE_SHARED_OWNED)
+ return 0;
+
prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
return host_stage2_idmap_locked(req->initiator.addr, PAGE_SIZE, prot);
}
@@ -595,6 +621,9 @@ static int hyp_complete_share(struct pkvm_page_req *req,
void *start = (void *)req->completer.addr, *end = start + PAGE_SIZE;
enum kvm_pgtable_prot prot;
+ if (req->initiator.state == PKVM_PAGE_SHARED_OWNED)
+ return 0;
+
prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
return pkvm_create_mappings_locked(start, end, prot);
}
@@ -653,10 +682,6 @@ static int do_share(struct pkvm_mem_share *share)
if (ret)
break;
- /* Allow double-sharing by skipping over the page */
- if (req.initiator.state == PKVM_PAGE_SHARED_OWNED)
- continue;
-
ret = initiate_share(&req, share);
if (ret)
break;
--
2.33.0.882.g93a45727a2-goog
Powered by blists - more mailing lists