[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241212180423.1578358-15-smostafa@google.com>
Date: Thu, 12 Dec 2024 18:03:38 +0000
From: Mostafa Saleh <smostafa@...gle.com>
To: iommu@...ts.linux.dev, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org
Cc: catalin.marinas@....com, will@...nel.org, maz@...nel.org,
oliver.upton@...ux.dev, joey.gouly@....com, suzuki.poulose@....com,
yuzenghui@...wei.com, robdclark@...il.com, joro@...tes.org,
robin.murphy@....com, jean-philippe@...aro.org, jgg@...pe.ca,
nicolinc@...dia.com, vdonnefort@...gle.com, qperret@...gle.com,
tabba@...gle.com, danielmentz@...gle.com, tzukui@...gle.com,
Mostafa Saleh <smostafa@...gle.com>
Subject: [RFC PATCH v2 14/58] KVM: arm64: pkvm: Add IOMMU hypercalls
The unprivileged host IOMMU driver forwards some of the IOMMU API calls
to the hypervisor, which installs and populates the page tables.
Note that this is not a stable ABI. Those hypercalls change with the
kernel just like internal function calls.
One thing special about some of the IOMMU hypercalls, that they use
newly added hyp_reqs_smccc_encode() to encode memory requests in
the HVC return, leveraging X1, X2 and X3 registers as allowed SMCCC.
Signed-off-by: Mostafa Saleh <smostafa@...gle.com>
Signed-off-by: Jean-Philippe Brucker <jean-philippe@...aro.org>
---
arch/arm64/include/asm/kvm_asm.h | 7 ++
arch/arm64/kvm/hyp/include/nvhe/iommu.h | 14 ++++
arch/arm64/kvm/hyp/nvhe/hyp-main.c | 89 +++++++++++++++++++++++++
arch/arm64/kvm/hyp/nvhe/iommu/iommu.c | 40 +++++++++++
4 files changed, 150 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index e4b391bdfdac..9ea155a04332 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -107,6 +107,13 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_refill,
__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaimable,
__KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaim,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_alloc_domain,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_free_domain,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_attach_dev,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_detach_dev,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_map_pages,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_unmap_pages,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iova_to_phys,
/*
* Start of the dynamically registered hypercalls. Start a bit
diff --git a/arch/arm64/kvm/hyp/include/nvhe/iommu.h b/arch/arm64/kvm/hyp/include/nvhe/iommu.h
index 1ac70cc28a9e..908863f07b0b 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/iommu.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/iommu.h
@@ -4,6 +4,20 @@
#include <asm/kvm_host.h>
+/* Hypercall handlers */
+int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type);
+int kvm_iommu_free_domain(pkvm_handle_t domain_id);
+int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id, u32 pasid, u32 pasid_bits);
+int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id, u32 pasid);
+size_t kvm_iommu_map_pages(pkvm_handle_t domain_id,
+ unsigned long iova, phys_addr_t paddr, size_t pgsize,
+ size_t pgcount, int prot);
+size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
+ size_t pgsize, size_t pgcount);
+phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova);
+
struct kvm_iommu_ops {
int (*init)(void);
};
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 08c0ff823a55..9b224842c487 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -19,6 +19,7 @@
#include <nvhe/alloc.h>
#include <nvhe/alloc_mgt.h>
#include <nvhe/ffa.h>
+#include <nvhe/iommu.h>
#include <nvhe/mem_protect.h>
#include <nvhe/modules.h>
#include <nvhe/mm.h>
@@ -1592,6 +1593,87 @@ static void handle___pkvm_hyp_alloc_mgt_reclaim(struct kvm_cpu_context *host_ctx
cpu_reg(host_ctxt, 2) = mc.nr_pages;
}
+static void handle___pkvm_host_iommu_alloc_domain(struct kvm_cpu_context *host_ctxt)
+{
+ int ret;
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+ DECLARE_REG(int, type, host_ctxt, 2);
+
+ ret = kvm_iommu_alloc_domain(domain, type);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_free_domain(struct kvm_cpu_context *host_ctxt)
+{
+ int ret;
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+
+ ret = kvm_iommu_free_domain(domain);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_attach_dev(struct kvm_cpu_context *host_ctxt)
+{
+ int ret;
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
+ DECLARE_REG(unsigned int, pasid, host_ctxt, 4);
+ DECLARE_REG(unsigned int, pasid_bits, host_ctxt, 5);
+
+ ret = kvm_iommu_attach_dev(iommu, domain, endpoint,
+ pasid, pasid_bits);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_detach_dev(struct kvm_cpu_context *host_ctxt)
+{
+ int ret;
+ DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
+ DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
+ DECLARE_REG(unsigned int, pasid, host_ctxt, 4);
+
+ ret = kvm_iommu_detach_dev(iommu, domain, endpoint, pasid);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_map_pages(struct kvm_cpu_context *host_ctxt)
+{
+ unsigned long ret;
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 2);
+ DECLARE_REG(phys_addr_t, paddr, host_ctxt, 3);
+ DECLARE_REG(size_t, pgsize, host_ctxt, 4);
+ DECLARE_REG(size_t, pgcount, host_ctxt, 5);
+ DECLARE_REG(unsigned int, prot, host_ctxt, 6);
+
+ ret = kvm_iommu_map_pages(domain, iova, paddr,
+ pgsize, pgcount, prot);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_unmap_pages(struct kvm_cpu_context *host_ctxt)
+{
+ unsigned long ret;
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 2);
+ DECLARE_REG(size_t, pgsize, host_ctxt, 3);
+ DECLARE_REG(size_t, pgcount, host_ctxt, 4);
+
+ ret = kvm_iommu_unmap_pages(domain, iova,
+ pgsize, pgcount);
+ hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
+}
+
+static void handle___pkvm_host_iommu_iova_to_phys(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
+ DECLARE_REG(unsigned long, iova, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = kvm_iommu_iova_to_phys(domain, iova);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -1649,6 +1731,13 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_hyp_alloc_mgt_refill),
HANDLE_FUNC(__pkvm_hyp_alloc_mgt_reclaimable),
HANDLE_FUNC(__pkvm_hyp_alloc_mgt_reclaim),
+ HANDLE_FUNC(__pkvm_host_iommu_alloc_domain),
+ HANDLE_FUNC(__pkvm_host_iommu_free_domain),
+ HANDLE_FUNC(__pkvm_host_iommu_attach_dev),
+ HANDLE_FUNC(__pkvm_host_iommu_detach_dev),
+ HANDLE_FUNC(__pkvm_host_iommu_map_pages),
+ HANDLE_FUNC(__pkvm_host_iommu_unmap_pages),
+ HANDLE_FUNC(__pkvm_host_iommu_iova_to_phys),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
index 3bd87d2084e9..9022fd612a49 100644
--- a/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
+++ b/arch/arm64/kvm/hyp/nvhe/iommu/iommu.c
@@ -16,3 +16,43 @@ int kvm_iommu_init(void)
return kvm_iommu_ops->init();
}
+
+int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type)
+{
+ return -ENODEV;
+}
+
+int kvm_iommu_free_domain(pkvm_handle_t domain_id)
+{
+ return -ENODEV;
+}
+
+int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id, u32 pasid, u32 pasid_bits)
+{
+ return -ENODEV;
+}
+
+int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
+ u32 endpoint_id, u32 pasid)
+{
+ return -ENODEV;
+}
+
+size_t kvm_iommu_map_pages(pkvm_handle_t domain_id,
+ unsigned long iova, phys_addr_t paddr, size_t pgsize,
+ size_t pgcount, int prot)
+{
+ return 0;
+}
+
+size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
+ size_t pgsize, size_t pgcount)
+{
+ return 0;
+}
+
+phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova)
+{
+ return 0;
+}
--
2.47.0.338.g60cca15819-goog
Powered by blists - more mailing lists