[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lpsgfkendbj3p3vuwy3ifjt63jqvfovuc4uc4qiv3rjiqqqhfb@owshpnxntkvt>
Date: Tue, 28 Oct 2025 14:04:54 +0100
From: Michał Winiarski <michal.winiarski@...el.com>
To: Michal Wajdeczko <michal.wajdeczko@...el.com>
CC: Alex Williamson <alex.williamson@...hat.com>, Lucas De Marchi
<lucas.demarchi@...el.com>, Thomas Hellström
<thomas.hellstrom@...ux.intel.com>, Rodrigo Vivi <rodrigo.vivi@...el.com>,
Jason Gunthorpe <jgg@...pe.ca>, Yishai Hadas <yishaih@...dia.com>, Kevin Tian
<kevin.tian@...el.com>, <intel-xe@...ts.freedesktop.org>,
<linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>, Matthew Brost
<matthew.brost@...el.com>, <dri-devel@...ts.freedesktop.org>, Jani Nikula
<jani.nikula@...ux.intel.com>, Joonas Lahtinen
<joonas.lahtinen@...ux.intel.com>, Tvrtko Ursulin <tursulin@...ulin.net>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>, "Lukasz
Laguna" <lukasz.laguna@...el.com>
Subject: Re: [PATCH v2 15/26] drm/xe/pf: Handle GuC migration data as part of
PF control
On Thu, Oct 23, 2025 at 10:39:12PM +0200, Michal Wajdeczko wrote:
>
>
> On 10/22/2025 12:41 AM, Michał Winiarski wrote:
> > Connect the helpers to allow save and restore of GuC migration data in
> > stop_copy / resume device state.
> >
> > Signed-off-by: Michał Winiarski <michal.winiarski@...el.com>
> > ---
> > drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 26 +++++++++++++++++--
> > .../gpu/drm/xe/xe_gt_sriov_pf_control_types.h | 2 ++
> > drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c | 9 ++++++-
> > 3 files changed, 34 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> > index c159f35adcbe7..18f6e3028d4f0 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> > @@ -188,6 +188,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
> > CASE2STR(SAVE_WIP);
> > CASE2STR(SAVE_PROCESS_DATA);
> > CASE2STR(SAVE_WAIT_DATA);
> > + CASE2STR(SAVE_DATA_GUC);
> > CASE2STR(SAVE_DATA_DONE);
> > CASE2STR(SAVE_FAILED);
> > CASE2STR(SAVED);
> > @@ -343,6 +344,7 @@ static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
> > pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED);
> > pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
> > pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
> > + pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED);
>
> this should be in one of the previous patch
It is - note that we're exiting this state twice :)
It's a leftover from previous revisions (at some point we were
introducing FAILED state here). I'll remove it.
>
> > pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED);
> > pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED);
> > pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED);
> > @@ -824,6 +826,7 @@ static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
> >
> > pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
> > pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA);
> > + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GUC);
> > pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
> > }
> > }
> > @@ -848,6 +851,16 @@ static void pf_enter_vf_save_failed(struct xe_gt *gt, unsigned int vfid)
> >
> > static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid)
> > {
> > + int ret;
> > +
> > + if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GUC)) {
> > + xe_gt_assert(gt, xe_gt_sriov_pf_migration_guc_size(gt, vfid) > 0);
> > +
> > + ret = xe_gt_sriov_pf_migration_guc_save(gt, vfid);
> > + if (ret)
> > + return ret;
> > + }
> > +
> > return 0;
> > }
> >
> > @@ -881,6 +894,7 @@ static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
> > {
> > if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
> > pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
> > + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_GUC);
> > pf_enter_vf_wip(gt, vfid);
> > pf_queue_vf(gt, vfid);
> > return true;
> > @@ -1046,14 +1060,22 @@ static int
> > pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid)
> > {
> > struct xe_sriov_migration_data *data = xe_gt_sriov_pf_migration_restore_consume(gt, vfid);
> > + int ret = 0;
> >
> > xe_gt_assert(gt, data);
> >
> > - xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->type);
> > + switch (data->type) {
> > + case XE_SRIOV_MIGRATION_DATA_TYPE_GUC:
> > + ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data);
> > + break;
> > + default:
> > + xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->type);
> > + break;
> > + }
> >
> > xe_sriov_migration_data_free(data);
> >
> > - return 0;
> > + return ret;
> > }
> >
> > static bool pf_handle_vf_restore(struct xe_gt *gt, unsigned int vfid)
> > diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> > index 35ceb2ff62110..8b951ee8a24fe 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> > +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
> > @@ -33,6 +33,7 @@
> > * @XE_GT_SRIOV_STATE_SAVE_WIP: indicates that VF save operation is in progress.
> > * @XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA: indicates that VF migration data is being produced.
> > * @XE_GT_SRIOV_STATE_SAVE_WAIT_DATA: indicates that PF awaits for space in migration data ring.
> > + * @XE_GT_SRIOV_STATE_SAVE_DATA_GUC: indicates PF needs to save VF GuC migration data.
> > * @XE_GT_SRIOV_STATE_SAVE_DATA_DONE: indicates that all migration data was produced by Xe.
> > * @XE_GT_SRIOV_STATE_SAVE_FAILED: indicates that VF save operation has failed.
> > * @XE_GT_SRIOV_STATE_SAVED: indicates that VF data is saved.
> > @@ -76,6 +77,7 @@ enum xe_gt_sriov_control_bits {
> > XE_GT_SRIOV_STATE_SAVE_WIP,
> > XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA,
> > XE_GT_SRIOV_STATE_SAVE_WAIT_DATA,
> > + XE_GT_SRIOV_STATE_SAVE_DATA_GUC,
>
> as DATA_GUC and introduced later DATA_GGTT/MMIO/VRAM are kind of sub-states of PROCESS_DATA,
> better to keep them together
>
> XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA,
> XE_GT_SRIOV_STATE_SAVE_DATA_GUC,
> XE_GT_SRIOV_STATE_SAVE_DATA_GGTT,
> XE_GT_SRIOV_STATE_SAVE_DATA_MMIO,
> XE_GT_SRIOV_STATE_SAVE_DATA_VRAM,
> XE_GT_SRIOV_STATE_SAVE_DATA_DONE,
> XE_GT_SRIOV_STATE_SAVE_WAIT_CONSUME,
>
> and at some point you need to update state diagram to include those DATA states
I'll extract this out from control state machine, as it's conceptually
similar to save_vram_offset introduced later in the series.
>
> > XE_GT_SRIOV_STATE_SAVE_DATA_DONE,
> > XE_GT_SRIOV_STATE_SAVE_FAILED,
> > XE_GT_SRIOV_STATE_SAVED,
> > diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> > index 127162e8c66e8..594178fbe36d0 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> > +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> > @@ -279,10 +279,17 @@ int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
> > ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid)
> > {
> > ssize_t total = 0;
> > + ssize_t size;
> >
> > xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> >
> > - /* Nothing to query yet - will be updated once per-GT migration data types are added */
> > + size = xe_gt_sriov_pf_migration_guc_size(gt, vfid);
> > + if (size < 0)
> > + return size;
> > + else if (size > 0)
>
> "else" not needed
Ok.
Thanks,
-Michał
> > + size += sizeof(struct xe_sriov_pf_migration_hdr);
> > + total += size;
> > +
> > return total;
> > }
> >
>
Powered by blists - more mailing lists