lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251011193847.1836454-6-michal.winiarski@intel.com>
Date: Sat, 11 Oct 2025 21:38:26 +0200
From: Michał Winiarski <michal.winiarski@...el.com>
To: Alex Williamson <alex.williamson@...hat.com>, Lucas De Marchi
	<lucas.demarchi@...el.com>, Thomas Hellström
	<thomas.hellstrom@...ux.intel.com>, Rodrigo Vivi <rodrigo.vivi@...el.com>,
	Jason Gunthorpe <jgg@...pe.ca>, Yishai Hadas <yishaih@...dia.com>, Kevin Tian
	<kevin.tian@...el.com>, Shameer Kolothum
	<shameerali.kolothum.thodi@...wei.com>, <intel-xe@...ts.freedesktop.org>,
	<linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>
CC: <dri-devel@...ts.freedesktop.org>, Matthew Brost
	<matthew.brost@...el.com>, Michal Wajdeczko <michal.wajdeczko@...el.com>,
	Jani Nikula <jani.nikula@...ux.intel.com>, Joonas Lahtinen
	<joonas.lahtinen@...ux.intel.com>, Tvrtko Ursulin <tursulin@...ulin.net>,
	David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>, "Lukasz
 Laguna" <lukasz.laguna@...el.com>, Michał Winiarski
	<michal.winiarski@...el.com>
Subject: [PATCH 05/26] drm/xe/pf: Add data structures and handlers for migration rings

Migration data is queued in a per-GT ptr_ring to decouple the worker
responsible for handling the data transfer from the .read()/.write()
syscalls.
Add the data structures and handlers that will be used in future
commits.

Signed-off-by: Michał Winiarski <michal.winiarski@...el.com>
---
 drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c   |   4 +
 drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 163 ++++++++++++++++++
 drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h |   9 +
 .../drm/xe/xe_gt_sriov_pf_migration_types.h   |   5 +-
 drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h     |   3 +
 drivers/gpu/drm/xe/xe_sriov_pf_migration.c    | 147 ++++++++++++++++
 drivers/gpu/drm/xe/xe_sriov_pf_migration.h    |  20 +++
 .../gpu/drm/xe/xe_sriov_pf_migration_types.h  |  37 ++++
 drivers/gpu/drm/xe/xe_sriov_pf_types.h        |   3 +
 9 files changed, 390 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 44df984278548..16a88e7599f6d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -19,6 +19,7 @@
 #include "xe_guc_ct.h"
 #include "xe_sriov.h"
 #include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_migration.h"
 #include "xe_sriov_pf_service.h"
 #include "xe_tile.h"
 
@@ -388,6 +389,8 @@ static bool pf_enter_vf_wip(struct xe_gt *gt, unsigned int vfid)
 
 static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid)
 {
+	struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid);
+
 	if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) {
 		struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
 
@@ -399,6 +402,7 @@ static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid)
 		pf_exit_vf_resume_wip(gt, vfid);
 
 		complete_all(&cs->done);
+		wake_up_all(wq);
 	}
 }
 
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index f8604b172963e..af5952f42fff1 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -7,6 +7,7 @@
 
 #include "abi/guc_actions_sriov_abi.h"
 #include "xe_bo.h"
+#include "xe_gt_sriov_pf_control.h"
 #include "xe_gt_sriov_pf_helpers.h"
 #include "xe_gt_sriov_pf_migration.h"
 #include "xe_gt_sriov_printk.h"
@@ -15,6 +16,17 @@
 #include "xe_sriov.h"
 #include "xe_sriov_pf_migration.h"
 
+#define XE_GT_SRIOV_PF_MIGRATION_RING_TIMEOUT (HZ * 20)
+#define XE_GT_SRIOV_PF_MIGRATION_RING_SIZE 5
+
+static struct xe_gt_sriov_pf_migration *pf_pick_gt_migration(struct xe_gt *gt, unsigned int vfid)
+{
+	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+	return &gt->sriov.pf.vfs[vfid].migration;
+}
+
 /* Return: number of dwords saved/restored/required or a negative error code on failure */
 static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
 				      u64 addr, u32 ndwords)
@@ -382,6 +394,142 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int
 }
 #endif /* CONFIG_DEBUG_FS */
 
+/**
+ * xe_gt_sriov_pf_migration_ring_empty() - Check if a migration ring is empty
+ * @gt: the &struct xe_gt
+ * @vfid: the VF identifier
+ *
+ * Return: true if the ring is empty, otherwise false.
+ */
+bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid)
+{
+	return ptr_ring_empty(&pf_pick_gt_migration(gt, vfid)->ring);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_produce() - Add migration data packet to migration ring
+ * @gt: the &struct xe_gt
+ * @vfid: the VF identifier
+ * @data: &struct xe_sriov_pf_migration_data packet
+ *
+ * If the ring is full, wait until there is space in the ring.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_ring_produce(struct xe_gt *gt, unsigned int vfid,
+					  struct xe_sriov_pf_migration_data *data)
+{
+	struct xe_gt_sriov_pf_migration *migration = pf_pick_gt_migration(gt, vfid);
+	struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid);
+	unsigned long timeout = XE_GT_SRIOV_PF_MIGRATION_RING_TIMEOUT;
+	int ret;
+
+	xe_gt_assert(gt, data->tile == gt->tile->id);
+	xe_gt_assert(gt, data->gt == gt->info.id);
+
+	while (1) {
+		ret = ptr_ring_produce(&migration->ring, data);
+		if (ret == 0) {
+			wake_up_all(wq);
+			break;
+		}
+
+		if (!xe_gt_sriov_pf_control_check_vf_data_wip(gt, vfid))
+			return -EINVAL;
+
+		ret = wait_event_interruptible_timeout(*wq,
+						       !ptr_ring_full(&migration->ring),
+						       timeout);
+		if (ret == 0)
+			return -ETIMEDOUT;
+
+		timeout = ret;
+	}
+
+	return ret;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_consume() - Get migration data packet from migration ring
+ * @gt: the &struct xe_gt
+ * @vfid: the VF identifier
+ *
+ * If the ring is empty, wait until there are new migration data packets to process.
+ *
+ * Return: Pointer to &struct xe_sriov_pf_migration_data on success,
+ *	   ERR_PTR(-ENODATA) if ring is empty and no more migration data is expected,
+ *	   ERR_PTR value in case of error.
+ */
+struct xe_sriov_pf_migration_data *
+xe_gt_sriov_pf_migration_ring_consume(struct xe_gt *gt, unsigned int vfid)
+{
+	struct xe_gt_sriov_pf_migration *migration = pf_pick_gt_migration(gt, vfid);
+	struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid);
+	unsigned long timeout = XE_GT_SRIOV_PF_MIGRATION_RING_TIMEOUT;
+	struct xe_sriov_pf_migration_data *data;
+	int ret;
+
+	while (1) {
+		data = ptr_ring_consume(&migration->ring);
+		if (data) {
+			wake_up_all(wq);
+			break;
+		}
+
+		if (!xe_gt_sriov_pf_control_check_vf_data_wip(gt, vfid))
+			return ERR_PTR(-ENODATA);
+
+		ret = wait_event_interruptible_timeout(*wq,
+					 !ptr_ring_empty(&migration->ring) ||
+					 !xe_gt_sriov_pf_control_check_vf_data_wip(gt, vfid),
+					 timeout);
+		if (ret == 0)
+			return ERR_PTR(-ETIMEDOUT);
+
+		timeout = ret;
+	}
+
+	return data;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_consume_nowait() - Get migration data packet from migration ring
+ * @gt: the &struct xe_gt
+ * @vfid: the VF identifier
+ *
+ * Similar to xe_gt_sriov_pf_migration_consume(), but doesn't wait until more data is available.
+ *
+ * Return: Pointer to &struct xe_sriov_pf_migration_data on success,
+ *	   ERR_PTR(-EAGAIN) if ring is empty but migration data is expected,
+ *	   ERR_PTR(-ENODATA) if ring is empty and no more migration data is expected,
+ *	   ERR_PTR value in case of error.
+ */
+struct xe_sriov_pf_migration_data *
+xe_gt_sriov_pf_migration_ring_consume_nowait(struct xe_gt *gt, unsigned int vfid)
+{
+	struct xe_gt_sriov_pf_migration *migration = pf_pick_gt_migration(gt, vfid);
+	struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid);
+	struct xe_sriov_pf_migration_data *data;
+
+	data = ptr_ring_consume(&migration->ring);
+	if (data) {
+		wake_up_all(wq);
+		return data;
+	}
+
+	if (!xe_gt_sriov_pf_control_check_vf_data_wip(gt, vfid))
+		return ERR_PTR(-ENODATA);
+
+	return ERR_PTR(-EAGAIN);
+}
+
+static void pf_gt_migration_cleanup(struct drm_device *dev, void *arg)
+{
+	struct xe_gt_sriov_pf_migration *migration = arg;
+
+	ptr_ring_cleanup(&migration->ring, NULL);
+}
+
 /**
  * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
  * @gt: the &xe_gt
@@ -393,6 +541,7 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int
 int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
 {
 	struct xe_device *xe = gt_to_xe(gt);
+	unsigned int n, totalvfs;
 	int err;
 
 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
@@ -404,5 +553,19 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
 	if (err)
 		return err;
 
+	totalvfs = xe_sriov_pf_get_totalvfs(xe);
+	for (n = 0; n <= totalvfs; n++) {
+		struct xe_gt_sriov_pf_migration *migration = pf_pick_gt_migration(gt, n);
+
+		err = ptr_ring_init(&migration->ring,
+				    XE_GT_SRIOV_PF_MIGRATION_RING_SIZE, GFP_KERNEL);
+		if (err)
+			return err;
+
+		err = drmm_add_action_or_reset(&xe->drm, pf_gt_migration_cleanup, migration);
+		if (err)
+			return err;
+	}
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
index 09faeae00ddbb..1e4dc46413823 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
@@ -9,11 +9,20 @@
 #include <linux/types.h>
 
 struct xe_gt;
+struct xe_sriov_pf_migration_data;
 
 int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
 int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
 int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
 
+bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_ring_produce(struct xe_gt *gt, unsigned int vfid,
+					  struct xe_sriov_pf_migration_data *data);
+struct xe_sriov_pf_migration_data *
+xe_gt_sriov_pf_migration_ring_consume(struct xe_gt *gt, unsigned int vfid);
+struct xe_sriov_pf_migration_data *
+xe_gt_sriov_pf_migration_ring_consume_nowait(struct xe_gt *gt, unsigned int vfid);
+
 #ifdef CONFIG_DEBUG_FS
 ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
 						char __user *buf, size_t count, loff_t *pos);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
index fdc5a31dd8989..8434689372082 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
@@ -7,6 +7,7 @@
 #define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
 
 #include <linux/mutex.h>
+#include <linux/ptr_ring.h>
 #include <linux/types.h>
 
 /**
@@ -27,9 +28,11 @@ struct xe_gt_sriov_state_snapshot {
 /**
  * struct xe_gt_sriov_pf_migration - GT-level data.
  *
- * Used by the PF driver to maintain non-VF specific per-GT data.
+ * Used by the PF driver to maintain per-VF migration data.
  */
 struct xe_gt_sriov_pf_migration {
+	/** @ring: queue containing VF save / restore migration data */
+	struct ptr_ring ring;
 };
 
 #endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 9a856da379d39..fbb08f8030f7f 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -33,6 +33,9 @@ struct xe_gt_sriov_metadata {
 
 	/** @snapshot: snapshot of the VF state data */
 	struct xe_gt_sriov_state_snapshot snapshot;
+
+	/** @migration: */
+	struct xe_gt_sriov_pf_migration migration;
 };
 
 /**
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
index cf6a210d5597a..347682f29a03c 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
@@ -4,7 +4,35 @@
  */
 
 #include "xe_sriov.h"
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_pf_migration.h"
+#include "xe_pm.h"
+#include "xe_sriov_pf_helpers.h"
 #include "xe_sriov_pf_migration.h"
+#include "xe_sriov_printk.h"
+
+static struct xe_sriov_pf_migration *pf_pick_migration(struct xe_device *xe, unsigned int vfid)
+{
+	xe_assert(xe, IS_SRIOV_PF(xe));
+	xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
+
+	return &xe->sriov.pf.vfs[vfid].migration;
+}
+
+/**
+ * xe_sriov_pf_migration_waitqueue - Get waitqueue for migration
+ * @xe: the &struct xe_device
+ * @vfid: the VF identifier
+ *
+ * Return: pointer to the migration waitqueue.
+ */
+wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid)
+{
+	return &pf_pick_migration(xe, vfid)->wq;
+}
 
 /**
  * xe_sriov_pf_migration_supported() - Check if SR-IOV VF migration is supported by the device
@@ -35,9 +63,128 @@ static bool pf_check_migration_support(struct xe_device *xe)
  */
 int xe_sriov_pf_migration_init(struct xe_device *xe)
 {
+	unsigned int n, totalvfs;
+
 	xe_assert(xe, IS_SRIOV_PF(xe));
 
 	xe->sriov.pf.migration.supported = pf_check_migration_support(xe);
+	if (!xe_sriov_pf_migration_supported(xe))
+		return 0;
+
+	totalvfs = xe_sriov_pf_get_totalvfs(xe);
+	for (n = 1; n <= totalvfs; n++) {
+		struct xe_sriov_pf_migration *migration = pf_pick_migration(xe, n);
+
+		init_waitqueue_head(&migration->wq);
+	}
 
 	return 0;
 }
+
+static bool pf_migration_empty(struct xe_device *xe, unsigned int vfid)
+{
+	struct xe_gt *gt;
+	u8 gt_id;
+
+	for_each_gt(gt, xe, gt_id) {
+		if (!xe_gt_sriov_pf_migration_ring_empty(gt, vfid))
+			return false;
+	}
+
+	return true;
+}
+
+static struct xe_sriov_pf_migration_data *
+pf_migration_consume(struct xe_device *xe, unsigned int vfid)
+{
+	struct xe_sriov_pf_migration_data *data;
+	struct xe_gt *gt;
+	u8 gt_id;
+	bool no_data = true;
+
+	for_each_gt(gt, xe, gt_id) {
+		data = xe_gt_sriov_pf_migration_ring_consume_nowait(gt, vfid);
+
+		if (!IS_ERR(data))
+			return data;
+		else if (PTR_ERR(data) == -EAGAIN)
+			no_data = false;
+	}
+
+	if (no_data)
+		return ERR_PTR(-ENODATA);
+
+	return ERR_PTR(-EAGAIN);
+}
+
+/**
+ * xe_sriov_pf_migration_consume() - Consume a SR-IOV VF migration data packet from the device
+ * @xe: the &struct xe_device
+ * @vfid: the VF identifier
+ *
+ * If there is no migration data to process, wait until more data is available.
+ *
+ * Return: Pointer to &struct xe_sriov_pf_migration_data on success,
+ *	   ERR_PTR(-ENODATA) if ring is empty and no more migration data is expected,
+ *	   ERR_PTR value in case of error.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+struct xe_sriov_pf_migration_data *
+xe_sriov_pf_migration_consume(struct xe_device *xe, unsigned int vfid)
+{
+	struct xe_sriov_pf_migration *migration = pf_pick_migration(xe, vfid);
+	unsigned long timeout = HZ * 5;
+	struct xe_sriov_pf_migration_data *data;
+	int ret;
+
+	if (!IS_SRIOV_PF(xe))
+		return ERR_PTR(-ENODEV);
+
+	while (1) {
+		data = pf_migration_consume(xe, vfid);
+		if (!IS_ERR(data) || PTR_ERR(data) != -EAGAIN)
+			goto out;
+
+		ret = wait_event_interruptible_timeout(migration->wq,
+						       !pf_migration_empty(xe, vfid),
+						       timeout);
+		if (ret == 0) {
+			xe_sriov_warn(xe, "VF%d Timed out waiting for migration data\n", vfid);
+			return ERR_PTR(-ETIMEDOUT);
+		}
+
+		timeout = ret;
+	}
+
+out:
+	return data;
+}
+
+/**
+ * xe_sriov_pf_migration_produce() - Produce a SR-IOV VF migration data packet for device to process
+ * @xe: the &struct xe_device
+ * @vfid: the VF identifier
+ * @data: VF migration data
+ *
+ * If the underlying data structure is full, wait until there is space.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_migration_produce(struct xe_device *xe, unsigned int vfid,
+				  struct xe_sriov_pf_migration_data *data)
+{
+	struct xe_gt *gt;
+
+	if (!IS_SRIOV_PF(xe))
+		return -ENODEV;
+
+	gt = xe_device_get_gt(xe, data->gt);
+	if (!gt || data->tile != gt->tile->id) {
+		xe_sriov_err_ratelimited(xe, "VF%d Unknown GT - tile_id:%d, gt_id:%d\n",
+					 vfid, data->tile, data->gt);
+		return -EINVAL;
+	}
+
+	return xe_gt_sriov_pf_migration_ring_produce(gt, vfid, data);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
index d3058b6682192..f2020ba19c2da 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
@@ -7,12 +7,18 @@
 #define _XE_SRIOV_PF_MIGRATION_H_
 
 #include <linux/types.h>
+#include <linux/wait.h>
 
 struct xe_device;
 
 #ifdef CONFIG_PCI_IOV
 int xe_sriov_pf_migration_init(struct xe_device *xe);
 bool xe_sriov_pf_migration_supported(struct xe_device *xe);
+struct xe_sriov_pf_migration_data *
+xe_sriov_pf_migration_consume(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_migration_produce(struct xe_device *xe, unsigned int vfid,
+				  struct xe_sriov_pf_migration_data *data);
+wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid);
 #else
 static inline int xe_sriov_pf_migration_init(struct xe_device *xe)
 {
@@ -22,6 +28,20 @@ static inline bool xe_sriov_pf_migration_supported(struct xe_device *xe)
 {
 	return false;
 }
+static inline struct xe_sriov_pf_migration_data *
+xe_sriov_pf_migration_consume(struct xe_device *xe, unsigned int vfid)
+{
+	return ERR_PTR(-ENODEV);
+}
+static inline int xe_sriov_pf_migration_produce(struct xe_device *xe, unsigned int vfid,
+						struct xe_sriov_pf_migration_data *data)
+{
+	return -ENODEV;
+}
+wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid)
+{
+	return ERR_PTR(-ENODEV);
+}
 #endif
 
 #endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
index e69de29bb2d1d..80fdea32b884a 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_MIGRATION_TYPES_H_
+#define _XE_SRIOV_PF_MIGRATION_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct xe_sriov_pf_migration_data {
+	struct xe_device *xe;
+	void *vaddr;
+	size_t remaining;
+	size_t hdr_remaining;
+	union {
+		struct xe_bo *bo;
+		void *buff;
+	};
+	__struct_group(xe_sriov_pf_migration_hdr, hdr, __packed,
+		u8 version;
+		u8 type;
+		u8 tile;
+		u8 gt;
+		u32 flags;
+		u64 offset;
+		u64 size;
+	);
+};
+
+struct xe_sriov_pf_migration {
+	/** @wq: waitqueue used to avoid busy-waiting for snapshot production/consumption */
+	wait_queue_head_t wq;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
index 2d2fcc0a2f258..b3ae21a5a0490 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
@@ -9,6 +9,7 @@
 #include <linux/mutex.h>
 #include <linux/types.h>
 
+#include "xe_sriov_pf_migration_types.h"
 #include "xe_sriov_pf_service_types.h"
 
 /**
@@ -17,6 +18,8 @@
 struct xe_sriov_metadata {
 	/** @version: negotiated VF/PF ABI version */
 	struct xe_sriov_pf_service_version version;
+	/** @migration: migration data */
+	struct xe_sriov_pf_migration migration;
 };
 
 /**
-- 
2.50.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ