[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <efde06ef-1ce4-4bc7-a0ca-01a27b697ae0@intel.com>
Date: Mon, 3 Nov 2025 19:30:36 +0100
From: Michal Wajdeczko <michal.wajdeczko@...el.com>
To: Michał Winiarski <michal.winiarski@...el.com>, "Alex
Williamson" <alex@...zbot.org>, Lucas De Marchi <lucas.demarchi@...el.com>,
Thomas Hellström <thomas.hellstrom@...ux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@...el.com>, Jason Gunthorpe <jgg@...pe.ca>,
Yishai Hadas <yishaih@...dia.com>, Kevin Tian <kevin.tian@...el.com>, Shameer
Kolothum <skolothumtho@...dia.com>, <intel-xe@...ts.freedesktop.org>,
<linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>, Matthew Brost
<matthew.brost@...el.com>
CC: <dri-devel@...ts.freedesktop.org>, Jani Nikula
<jani.nikula@...ux.intel.com>, Joonas Lahtinen
<joonas.lahtinen@...ux.intel.com>, Tvrtko Ursulin <tursulin@...ulin.net>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>, "Lukasz
Laguna" <lukasz.laguna@...el.com>, Christoph Hellwig <hch@...radead.org>
Subject: Re: [PATCH v3 15/28] drm/xe/pf: Switch VF migration GuC save/restore
to struct migration data
On 10/30/2025 9:31 PM, Michał Winiarski wrote:
> In upcoming changes, the GuC VF migration data will be handled as part
> of separate SAVE/RESTORE states in VF control state machine.
> Now that the data is decoupled from both guc_state debugfs and PAUSE
> state, we can safely remove the struct xe_gt_sriov_state_snapshot and
> modify the GuC save/restore functions to operate on struct
> xe_sriov_migration_data.
hmm, that reminded me that maybe instead
xe_sriov_migration_data
better name for this "data" struct could be
xe_sriov_migration_packet
to make more distinguished from
xe_gt_sriov_migration_data
which has completely different usage
>
> Signed-off-by: Michał Winiarski <michal.winiarski@...el.com>
> ---
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 265 +++++-------------
> drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h | 13 +-
> .../drm/xe/xe_gt_sriov_pf_migration_types.h | 27 --
> drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h | 4 -
> 4 files changed, 79 insertions(+), 230 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> index a2db127982638..4a716e0a29fe4 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> @@ -28,6 +28,17 @@ static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt,
> return >->sriov.pf.vfs[vfid].migration;
> }
>
> +static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data)
> +{
> + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
> + print_hex_dump_bytes("mig_hdr: ", DUMP_PREFIX_OFFSET,
> + &data->hdr, sizeof(data->hdr));
> + print_hex_dump_bytes("mig_data: ", DUMP_PREFIX_OFFSET,
> + data->vaddr, min(SZ_64, data->size));
> + }
nit: maybe this function should be based on the drm_printer/drm_print_hex_dump
then we will get proper GTn: prefix
> +}
> +
> /* Return: number of dwords saved/restored/required or a negative error code on failure */
> static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
> u64 addr, u32 ndwords)
> @@ -47,7 +58,7 @@ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
> }
>
> /* Return: size of the state in dwords or a negative error code on failure */
> -static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
> +static int pf_send_guc_query_vf_mig_data_size(struct xe_gt *gt, unsigned int vfid)
> {
> int ret;
>
> @@ -56,8 +67,8 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
> }
>
> /* Return: number of state dwords saved or a negative error code on failure */
> -static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
> - void *dst, size_t size)
> +static int pf_send_guc_save_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
> + void *dst, size_t size)
> {
> const int ndwords = size / sizeof(u32);
> struct xe_guc *guc = >->uc.guc;
> @@ -85,8 +96,8 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
> }
>
> /* Return: number of state dwords restored or a negative error code on failure */
> -static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
> - const void *src, size_t size)
> +static int pf_send_guc_restore_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
> + const void *src, size_t size)
> {
> const int ndwords = size / sizeof(u32);
> struct xe_guc *guc = >->uc.guc;
> @@ -114,120 +125,67 @@ static bool pf_migration_supported(struct xe_gt *gt)
> return xe_sriov_pf_migration_supported(gt_to_xe(gt));
> }
>
> -static struct mutex *pf_migration_mutex(struct xe_gt *gt)
> +static int pf_save_vf_guc_mig_data(struct xe_gt *gt, unsigned int vfid)
> {
> - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> - return >->sriov.pf.migration.snapshot_lock;
> -}
> -
> -static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
> - unsigned int vfid)
> -{
> - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> - xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> - lockdep_assert_held(pf_migration_mutex(gt));
> -
> - return >->sriov.pf.vfs[vfid].snapshot;
> -}
> -
> -static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
> -{
> - return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
> -}
> -
> -static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
> -{
> - struct xe_device *xe = gt_to_xe(gt);
> -
> - drmm_kfree(&xe->drm, snapshot->guc.buff);
> - snapshot->guc.buff = NULL;
> - snapshot->guc.size = 0;
> -}
> -
> -static int pf_alloc_guc_state(struct xe_gt *gt,
> - struct xe_gt_sriov_state_snapshot *snapshot,
> - size_t size)
> -{
> - struct xe_device *xe = gt_to_xe(gt);
> - void *p;
> -
> - pf_free_guc_state(gt, snapshot);
> -
> - if (!size)
> - return -ENODATA;
> -
> - if (size % sizeof(u32))
> - return -EINVAL;
> -
> - if (size > SZ_2M)
> - return -EFBIG;
> -
> - p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
> - if (!p)
> - return -ENOMEM;
> -
> - snapshot->guc.buff = p;
> - snapshot->guc.size = size;
> - return 0;
> -}
> -
> -static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
> -{
> - if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
> - unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
> -
> - xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
> - vfid, snapshot->guc.size / sizeof(u32));
> - print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
> - snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
> - }
> -}
> -
> -static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
> -{
> - struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
> + struct xe_sriov_migration_data *data;
> size_t size;
> int ret;
>
> - ret = pf_send_guc_query_vf_state_size(gt, vfid);
> + ret = pf_send_guc_query_vf_mig_data_size(gt, vfid);
> if (ret < 0)
> goto fail;
> +
> size = ret * sizeof(u32);
> - xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
>
> - ret = pf_alloc_guc_state(gt, snapshot, size);
> - if (ret < 0)
> + data = xe_sriov_migration_data_alloc(gt_to_xe(gt));
> + if (!data) {
> + ret = -ENOMEM;
> goto fail;
> + }
> +
> + ret = xe_sriov_migration_data_init(data, gt->tile->id, gt->info.id,
> + XE_SRIOV_MIGRATION_DATA_TYPE_GUC, 0, size);
> + if (ret)
> + goto fail_free;
>
> - ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
> + ret = pf_send_guc_save_vf_mig_data(gt, vfid, data->vaddr, size);
> if (ret < 0)
> - goto fail;
> + goto fail_free;
> size = ret * sizeof(u32);
> xe_gt_assert(gt, size);
> - xe_gt_assert(gt, size <= snapshot->guc.size);
> - snapshot->guc.size = size;
> + xe_gt_assert(gt, size <= data->size);
> + data->size = size;
> + data->remaining = size;
> +
> + xe_gt_sriov_dbg_verbose(gt, "VF%u GuC data save (%zu bytes)\n", vfid, size);
> + pf_dump_mig_data(gt, vfid, data);
as already commented elsewhere, these two lines are always together,
we can combine them into improved pf_dump_mig_data(gt, vfid, data, what)
> +
> + ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
> + if (ret)
> + goto fail_free;
>
> - pf_dump_guc_state(gt, snapshot);
> return 0;
>
> +fail_free:
> + xe_sriov_migration_data_free(data);
> fail:
> - xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
> - pf_free_guc_state(gt, snapshot);
> + xe_gt_sriov_err(gt, "Failed to save VF%u GuC data (%pe)\n",
> + vfid, ERR_PTR(ret));
> return ret;
> }
>
> /**
> - * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
> + * xe_gt_sriov_pf_migration_guc_size() - Get the size of VF GuC migration data.
> * @gt: the &xe_gt
> * @vfid: the VF identifier
> *
> * This function is for PF only.
> *
> - * Return: 0 on success or a negative error code on failure.
> + * Return: size in bytes or a negative error code on failure.
> */
> -int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
> +ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid)
> {
> - int err;
> + ssize_t size;
>
> xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> xe_gt_assert(gt, vfid != PFID);
> @@ -236,37 +194,15 @@ int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
> if (!pf_migration_supported(gt))
> return -ENOPKG;
>
> - mutex_lock(pf_migration_mutex(gt));
> - err = pf_save_vf_guc_state(gt, vfid);
> - mutex_unlock(pf_migration_mutex(gt));
> -
> - return err;
> -}
> -
> -static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
> -{
> - struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
> - int ret;
> -
> - if (!snapshot->guc.size)
> - return -ENODATA;
> -
> - xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
> - snapshot->guc.size / sizeof(u32), vfid);
> - ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
> - if (ret < 0)
> - goto fail;
> -
> - xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
> - return 0;
> + size = pf_send_guc_query_vf_mig_data_size(gt, vfid);
> + if (size >= 0)
> + size *= sizeof(u32);
>
> -fail:
> - xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
> - return ret;
> + return size;
> }
>
> /**
> - * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
> + * xe_gt_sriov_pf_migration_guc_save() - Save VF GuC migration data.
> * @gt: the &xe_gt
> * @vfid: the VF identifier
> *
> @@ -274,10 +210,8 @@ static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
> *
> * Return: 0 on success or a negative error code on failure.
> */
> -int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
> +int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid)
> {
> - int ret;
> -
> xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> xe_gt_assert(gt, vfid != PFID);
> xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> @@ -285,75 +219,43 @@ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vf
> if (!pf_migration_supported(gt))
> return -ENOPKG;
>
> - mutex_lock(pf_migration_mutex(gt));
> - ret = pf_restore_vf_guc_state(gt, vfid);
> - mutex_unlock(pf_migration_mutex(gt));
> -
> - return ret;
> + return pf_save_vf_guc_mig_data(gt, vfid);
> }
>
> -#ifdef CONFIG_DEBUG_FS
> -/**
> - * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
> - * @gt: the &xe_gt
> - * @vfid: the VF identifier
> - * @buf: the user space buffer to read to
> - * @count: the maximum number of bytes to read
> - * @pos: the current position in the buffer
> - *
> - * This function is for PF only.
> - *
> - * This function reads up to @count bytes from the saved VF GuC state buffer
> - * at offset @pos into the user space address starting at @buf.
> - *
> - * Return: the number of bytes read or a negative error code on failure.
> - */
> -ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
> - char __user *buf, size_t count, loff_t *pos)
> +static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data)
> {
> - struct xe_gt_sriov_state_snapshot *snapshot;
> - ssize_t ret;
> + int ret;
>
> - xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> - xe_gt_assert(gt, vfid != PFID);
> - xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> + xe_gt_assert(gt, data->size);
>
> - if (!pf_migration_supported(gt))
> - return -ENOPKG;
> + xe_gt_sriov_dbg_verbose(gt, "VF%u GuC data restore (%llu bytes)\n", vfid, data->size);
> + pf_dump_mig_data(gt, vfid, data);
>
> - mutex_lock(pf_migration_mutex(gt));
> - snapshot = pf_pick_vf_snapshot(gt, vfid);
> - if (snapshot->guc.size)
> - ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
> - snapshot->guc.size);
> - else
> - ret = -ENODATA;
> - mutex_unlock(pf_migration_mutex(gt));
> + ret = pf_send_guc_restore_vf_mig_data(gt, vfid, data->vaddr, data->size);
> + if (ret < 0)
> + goto fail;
> +
> + return 0;
>
> +fail:
> + xe_gt_sriov_err(gt, "Failed to restore VF%u GuC data (%pe)\n",
> + vfid, ERR_PTR(ret));
> return ret;
> }
>
> /**
> - * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
> + * xe_gt_sriov_pf_migration_guc_restore() - Restore VF GuC migration data.
> * @gt: the &xe_gt
> * @vfid: the VF identifier
> - * @buf: the user space buffer with GuC VF state
> - * @size: the size of GuC VF state (in bytes)
> *
> * This function is for PF only.
> *
> - * This function reads @size bytes of the VF GuC state stored at user space
> - * address @buf and writes it into a internal VF state buffer.
> - *
> - * Return: the number of bytes used or a negative error code on failure.
> + * Return: 0 on success or a negative error code on failure.
> */
> -ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
> - const char __user *buf, size_t size)
> +int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data)
> {
> - struct xe_gt_sriov_state_snapshot *snapshot;
> - loff_t pos = 0;
> - ssize_t ret;
> -
> xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> xe_gt_assert(gt, vfid != PFID);
> xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
> @@ -361,21 +263,8 @@ ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int
> if (!pf_migration_supported(gt))
> return -ENOPKG;
>
> - mutex_lock(pf_migration_mutex(gt));
> - snapshot = pf_pick_vf_snapshot(gt, vfid);
> - ret = pf_alloc_guc_state(gt, snapshot, size);
> - if (!ret) {
> - ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
> - if (ret < 0)
> - pf_free_guc_state(gt, snapshot);
> - else
> - pf_dump_guc_state(gt, snapshot);
> - }
> - mutex_unlock(pf_migration_mutex(gt));
> -
> - return ret;
> + return pf_restore_vf_guc_state(gt, vfid, data);
> }
> -#endif /* CONFIG_DEBUG_FS */
>
> /**
> * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT.
> @@ -599,10 +488,6 @@ int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
> if (!pf_migration_supported(gt))
> return 0;
>
> - err = drmm_mutex_init(&xe->drm, >->sriov.pf.migration.snapshot_lock);
> - if (err)
> - return err;
> -
> totalvfs = xe_sriov_pf_get_totalvfs(xe);
> for (n = 1; n <= totalvfs; n++) {
> struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, n);
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> index 4f2f2783339c3..b3c18e369df79 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> @@ -15,8 +15,10 @@ struct xe_sriov_migration_data;
> #define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M
>
> int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
> -int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
> -int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
> +ssize_t xe_gt_sriov_pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid);
> +int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid);
> +int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
> + struct xe_sriov_migration_data *data);
>
> ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid);
>
> @@ -34,11 +36,4 @@ int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid
> struct xe_sriov_migration_data *
> xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid);
>
> -#ifdef CONFIG_DEBUG_FS
> -ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
> - char __user *buf, size_t count, loff_t *pos);
> -ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
> - const char __user *buf, size_t count);
> -#endif
> -
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
> index 84be6fac16c8b..75d8b94cbbefb 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
> @@ -6,24 +6,7 @@
> #ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
> #define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
>
> -#include <linux/mutex.h>
> #include <linux/ptr_ring.h>
> -#include <linux/types.h>
> -
> -/**
> - * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data.
> - *
> - * Used by the PF driver to maintain per-VF migration data.
> - */
> -struct xe_gt_sriov_state_snapshot {
> - /** @guc: GuC VF state snapshot */
> - struct {
> - /** @guc.buff: buffer with the VF state */
> - u32 *buff;
> - /** @guc.size: size of the buffer (must be dwords aligned) */
> - u32 size;
> - } guc;
> -};
>
> /**
> * struct xe_gt_sriov_migration_data - GT-level per-VF migration data.
> @@ -35,14 +18,4 @@ struct xe_gt_sriov_migration_data {
> struct ptr_ring ring;
> };
>
> -/**
> - * struct xe_gt_sriov_pf_migration - GT-level data.
> - *
> - * Used by the PF driver to maintain non-VF specific per-GT data.
> - */
> -struct xe_gt_sriov_pf_migration {
> - /** @snapshot_lock: protects all VFs snapshots */
> - struct mutex snapshot_lock;
> -};
> -
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
> index 812e74d3f8f80..667b8310478d4 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
> @@ -31,9 +31,6 @@ struct xe_gt_sriov_metadata {
> /** @version: negotiated VF/PF ABI version */
> struct xe_gt_sriov_pf_service_version version;
>
> - /** @snapshot: snapshot of the VF state data */
> - struct xe_gt_sriov_state_snapshot snapshot;
> -
> /** @migration: per-VF migration data. */
> struct xe_gt_sriov_migration_data migration;
> };
> @@ -61,7 +58,6 @@ struct xe_gt_sriov_pf {
> struct xe_gt_sriov_pf_service service;
> struct xe_gt_sriov_pf_control control;
> struct xe_gt_sriov_pf_policy policy;
> - struct xe_gt_sriov_pf_migration migration;
> struct xe_gt_sriov_spare_config spare;
> struct xe_gt_sriov_metadata *vfs;
> };
otherwise LGTM, so with dump helper improved
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@...el.com>
Powered by blists - more mailing lists