[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250819215156.2494305-18-smostafa@google.com>
Date: Tue, 19 Aug 2025 21:51:45 +0000
From: Mostafa Saleh <smostafa@...gle.com>
To: linux-kernel@...r.kernel.org, kvmarm@...ts.linux.dev,
linux-arm-kernel@...ts.infradead.org, iommu@...ts.linux.dev
Cc: maz@...nel.org, oliver.upton@...ux.dev, joey.gouly@....com,
suzuki.poulose@....com, yuzenghui@...wei.com, catalin.marinas@....com,
will@...nel.org, robin.murphy@....com, jean-philippe@...aro.org,
qperret@...gle.com, tabba@...gle.com, jgg@...pe.ca, mark.rutland@....com,
praan@...gle.com, Mostafa Saleh <smostafa@...gle.com>
Subject: [PATCH v4 17/28] iommu/arm-smmu-v3-kvm: Take over SMMUs
Donate the array with SMMU description to the hypervisor as it
can't be changed by the host after de-privileges.
Also, donate the SMMU resources to the hypervisor.
Signed-off-by: Mostafa Saleh <smostafa@...gle.com>
---
.../iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c | 81 ++++++++++++++++++-
1 file changed, 80 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
index fa8b71152560..b56feae81dda 100644
--- a/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/pkvm/arm-smmu-v3.c
@@ -7,15 +7,94 @@
#include <asm/kvm_hyp.h>
#include <nvhe/iommu.h>
+#include <nvhe/mem_protect.h>
#include "arm_smmu_v3.h"
size_t __ro_after_init kvm_hyp_arm_smmu_v3_count;
struct hyp_arm_smmu_v3_device *kvm_hyp_arm_smmu_v3_smmus;
+#define for_each_smmu(smmu) \
+ for ((smmu) = kvm_hyp_arm_smmu_v3_smmus; \
+ (smmu) != &kvm_hyp_arm_smmu_v3_smmus[kvm_hyp_arm_smmu_v3_count]; \
+ (smmu)++)
+
+/* Transfer ownership of memory */
+static int smmu_take_pages(u64 phys, size_t size)
+{
+ WARN_ON(!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size));
+ return __pkvm_host_donate_hyp(phys >> PAGE_SHIFT, size >> PAGE_SHIFT);
+}
+
+static void smmu_reclaim_pages(u64 phys, size_t size)
+{
+ WARN_ON(!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size));
+ WARN_ON(__pkvm_hyp_donate_host(phys >> PAGE_SHIFT, size >> PAGE_SHIFT));
+}
+
+/* Put the device in a state that can be probed by the host driver. */
+static void smmu_deinit_device(struct hyp_arm_smmu_v3_device *smmu)
+{
+ int i;
+ size_t nr_pages = smmu->mmio_size >> PAGE_SHIFT;
+
+ for (i = 0 ; i < nr_pages ; ++i) {
+ u64 pfn = (smmu->mmio_addr >> PAGE_SHIFT) + i;
+
+ WARN_ON(__pkvm_hyp_donate_host_mmio(pfn));
+ }
+}
+
+static int smmu_init_device(struct hyp_arm_smmu_v3_device *smmu)
+{
+ int i;
+ size_t nr_pages;
+
+ if (!PAGE_ALIGNED(smmu->mmio_addr | smmu->mmio_size))
+ return -EINVAL;
+
+ nr_pages = smmu->mmio_size >> PAGE_SHIFT;
+ for (i = 0 ; i < nr_pages ; ++i) {
+ u64 pfn = (smmu->mmio_addr >> PAGE_SHIFT) + i;
+
+ /*
+ * This should never happen, so it's fine to be strict to avoid
+ * complicated error handling.
+ */
+ WARN_ON(__pkvm_host_donate_hyp_mmio(pfn));
+ }
+ smmu->base = hyp_phys_to_virt(smmu->mmio_addr);
+
+ return 0;
+}
+
static int smmu_init(void)
{
- return -ENOSYS;
+ int ret;
+ struct hyp_arm_smmu_v3_device *smmu;
+ size_t smmu_arr_size = PAGE_ALIGN(sizeof(*kvm_hyp_arm_smmu_v3_smmus) *
+ kvm_hyp_arm_smmu_v3_count);
+
+ kvm_hyp_arm_smmu_v3_smmus = kern_hyp_va(kvm_hyp_arm_smmu_v3_smmus);
+ ret = smmu_take_pages(hyp_virt_to_phys(kvm_hyp_arm_smmu_v3_smmus),
+ smmu_arr_size);
+ if (ret)
+ return ret;
+
+ for_each_smmu(smmu) {
+ ret = smmu_init_device(smmu);
+ if (ret)
+ goto out_reclaim_smmu;
+ }
+
+ return 0;
+
+out_reclaim_smmu:
+ while (smmu != kvm_hyp_arm_smmu_v3_smmus)
+ smmu_deinit_device(--smmu);
+ smmu_reclaim_pages(hyp_virt_to_phys(kvm_hyp_arm_smmu_v3_smmus),
+ smmu_arr_size);
+ return ret;
}
static void smmu_host_stage2_idmap(phys_addr_t start, phys_addr_t end, int prot)
--
2.51.0.rc1.167.g924127e9c0-goog
Powered by blists - more mailing lists