[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251202230303.1017519-13-skhawaja@google.com>
Date: Tue, 2 Dec 2025 23:02:42 +0000
From: Samiullah Khawaja <skhawaja@...gle.com>
To: David Woodhouse <dwmw2@...radead.org>, Lu Baolu <baolu.lu@...ux.intel.com>,
Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
Pasha Tatashin <pasha.tatashin@...een.com>, Jason Gunthorpe <jgg@...pe.ca>, iommu@...ts.linux.dev
Cc: Samiullah Khawaja <skhawaja@...gle.com>, Robin Murphy <robin.murphy@....com>,
Pratyush Yadav <pratyush@...nel.org>, Kevin Tian <kevin.tian@...el.com>,
Alex Williamson <alex@...zbot.org>, linux-kernel@...r.kernel.org,
Saeed Mahameed <saeedm@...dia.com>, Adithya Jayachandran <ajayachandra@...dia.com>,
Parav Pandit <parav@...dia.com>, Leon Romanovsky <leonro@...dia.com>, William Tu <witu@...dia.com>,
Vipin Sharma <vipinsh@...gle.com>, dmatlack@...gle.com, YiFei Zhu <zhuyifei@...gle.com>,
Chris Li <chrisl@...nel.org>, praan@...gle.com
Subject: [RFC PATCH v2 12/32] iommupt: Implement preserve/unpreserve/restore callbacks
Implement the iommu domain ops for presevation, unpresevation and
restoration of iommu domains for liveupdate. Use the existing page
walker to preserve the ioptdesc of the top_table and the lower tables.
Preserve the top_level also so it can be restored during boot.
Signed-off-by: Samiullah Khawaja <skhawaja@...gle.com>
---
drivers/iommu/generic_pt/iommu_pt.h | 100 ++++++++++++++++++++++++++++
include/linux/generic_pt/iommu.h | 10 +++
2 files changed, 110 insertions(+)
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
index 032d04ec7b56..f71b8c92372d 100644
--- a/drivers/iommu/generic_pt/iommu_pt.h
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -354,6 +354,7 @@ static int __collect_tables(struct pt_range *range, void *arg,
return ret;
continue;
}
+
if (pts.type == PT_ENTRY_OA && collect->check_mapped)
return -EADDRINUSE;
}
@@ -918,6 +919,105 @@ int DOMAIN_NS(map_pages)(struct iommu_domain *domain, unsigned long iova,
}
EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(map_pages), "GENERIC_PT_IOMMU");
+/**
+ * unpreserve() - Unpreserve page tables and other state of a domain.
+ * @domain: Domain to unpreserve
+ */
+void DOMAIN_NS(unpreserve)(struct iommu_domain *domain, struct iommu_domain_ser *ser)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_range range = pt_all_range(common);
+ struct pt_iommu_collect_args collect = {
+ .free_list = IOMMU_PAGES_LIST_INIT(collect.free_list),
+ };
+
+ iommu_pages_list_add(&collect.free_list, range.top_table);
+ pt_walk_range(&range, __collect_tables, &collect);
+
+ iommu_unpreserve_pages(&collect.free_list, -1);
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(unpreserve), "GENERIC_PT_IOMMU");
+
+/**
+ * preserve() - Preserve page tables and other state of a domain.
+ * @domain: Domain to preserve
+ *
+ * Returns: -ERRNO on failure, on success.
+ */
+int DOMAIN_NS(preserve)(struct iommu_domain *domain, struct iommu_domain_ser *ser)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_range range = pt_all_range(common);
+ struct pt_iommu_collect_args collect = {
+ .free_list = IOMMU_PAGES_LIST_INIT(collect.free_list),
+ };
+ int ret;
+
+ iommu_pages_list_add(&collect.free_list, range.top_table);
+ pt_walk_range(&range, __collect_tables, &collect);
+
+ ret = iommu_preserve_pages(&collect.free_list);
+ if (ret)
+ return ret;
+
+ ser->top_table = virt_to_phys(range.top_table);
+ ser->top_level = range.top_level;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(preserve), "GENERIC_PT_IOMMU");
+
+static int __restore_tables(struct pt_range *range, void *arg,
+ unsigned int level, struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ int ret;
+
+ for_each_pt_level_entry(&pts) {
+ if (pts.type == PT_ENTRY_TABLE) {
+ iommu_restore_page(virt_to_phys(pts.table_lower));
+ ret = pt_descend(&pts, arg, __restore_tables);
+ if (ret)
+ return ret;
+ continue;
+ }
+ }
+ return 0;
+}
+
+/**
+ * restore() - Restore page tables and other state of a domain.
+ * @domain: Domain to preserve
+ *
+ * Returns: -ERRNO on failure, on success.
+ */
+int DOMAIN_NS(restore)(struct iommu_domain *domain, struct iommu_domain_ser *ser)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_range range = pt_all_range(common);
+
+ iommu_restore_page(ser->top_table);
+
+ /* Free new table */
+ iommu_free_pages(range.top_table);
+
+ /* Set the restored top table */
+ pt_top_set(common, phys_to_virt(ser->top_table), ser->top_level);
+
+ /* Collect all pages*/
+ range = pt_all_range(common);
+ pt_walk_range(&range, __restore_tables, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(restore), "GENERIC_PT_IOMMU");
+
struct pt_unmap_args {
struct iommu_pages_list free_list;
pt_vaddr_t unmapped;
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
index cfe05a77f86b..d67d1d8b509f 100644
--- a/include/linux/generic_pt/iommu.h
+++ b/include/linux/generic_pt/iommu.h
@@ -13,6 +13,7 @@ struct iommu_iotlb_gather;
struct pt_iommu_ops;
struct pt_iommu_driver_ops;
struct iommu_dirty_bitmap;
+struct iommu_domain_ser;
/**
* DOC: IOMMU Radix Page Table
@@ -198,6 +199,12 @@ struct pt_iommu_cfg {
unsigned long iova, phys_addr_t paddr, \
size_t pgsize, size_t pgcount, \
int prot, gfp_t gfp, size_t *mapped); \
+ int pt_iommu_##fmt##_preserve(struct iommu_domain *domain, \
+ struct iommu_domain_ser *ser); \
+ void pt_iommu_##fmt##_unpreserve(struct iommu_domain *domain, \
+ struct iommu_domain_ser *ser); \
+ int pt_iommu_##fmt##_restore(struct iommu_domain *domain, \
+ struct iommu_domain_ser *ser); \
size_t pt_iommu_##fmt##_unmap_pages( \
struct iommu_domain *domain, unsigned long iova, \
size_t pgsize, size_t pgcount, \
@@ -224,6 +231,9 @@ struct pt_iommu_cfg {
#define IOMMU_PT_DOMAIN_OPS(fmt) \
.iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
.map_pages = &pt_iommu_##fmt##_map_pages, \
+ .preserve = &pt_iommu_##fmt##_preserve, \
+ .unpreserve = &pt_iommu_##fmt##_unpreserve, \
+ .restore = &pt_iommu_##fmt##_restore, \
.unmap_pages = &pt_iommu_##fmt##_unmap_pages
#define IOMMU_PT_DIRTY_OPS(fmt) \
.read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty
--
2.52.0.158.g65b55ccf14-goog
Powered by blists - more mailing lists