[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <fded9abd-0e42-443f-b397-082a82a6733b@intel.com>
Date: Mon, 13 Oct 2025 15:28:20 +0200
From: Michal Wajdeczko <michal.wajdeczko@...el.com>
To: Michał Winiarski <michal.winiarski@...el.com>, "Alex
Williamson" <alex.williamson@...hat.com>, Lucas De Marchi
<lucas.demarchi@...el.com>, Thomas Hellström
<thomas.hellstrom@...ux.intel.com>, Rodrigo Vivi <rodrigo.vivi@...el.com>,
Jason Gunthorpe <jgg@...pe.ca>, Yishai Hadas <yishaih@...dia.com>, Kevin Tian
<kevin.tian@...el.com>, Shameer Kolothum
<shameerali.kolothum.thodi@...wei.com>, <intel-xe@...ts.freedesktop.org>,
<linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>
CC: <dri-devel@...ts.freedesktop.org>, Matthew Brost
<matthew.brost@...el.com>, Jani Nikula <jani.nikula@...ux.intel.com>, "Joonas
Lahtinen" <joonas.lahtinen@...ux.intel.com>, Tvrtko Ursulin
<tursulin@...ulin.net>, David Airlie <airlied@...il.com>, Simona Vetter
<simona@...ll.ch>, Lukasz Laguna <lukasz.laguna@...el.com>
Subject: Re: [PATCH 19/26] drm/xe/pf: Add helpers for VF MMIO migration data
handling
On 10/11/2025 9:38 PM, Michał Winiarski wrote:
> In an upcoming change, the VF MMIO migration data will be handled as
> part of VF control state machine. Add the necessary helpers to allow the
> migration data transfer to/from the VF MMIO registers.
>
> Signed-off-by: Michał Winiarski <michal.winiarski@...el.com>
> ---
> drivers/gpu/drm/xe/xe_gt_sriov_pf.c | 88 +++++++++++++++++++++++++++++
> drivers/gpu/drm/xe/xe_gt_sriov_pf.h | 19 +++++++
> 2 files changed, 107 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
> index c4dda87b47cc8..6ceb9e024e41e 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
> @@ -194,6 +194,94 @@ static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
> }
> }
>
> +/**
> + * xe_gt_sriov_pf_mmio_vf_size - Get the size of VF MMIO register data.
> + * @gt: the &struct xe_gt
> + * @vfid: VF identifier
> + *
> + * Return: size in bytes.
> + */
> +size_t xe_gt_sriov_pf_mmio_vf_size(struct xe_gt *gt, unsigned int vfid)
> +{
> + if (xe_gt_is_media_type(gt))
> + return MED_VF_SW_FLAG_COUNT * sizeof(u32);
> + else
> + return VF_SW_FLAG_COUNT * sizeof(u32);
> +}
> +
> +/**
> + * xe_gt_sriov_pf_mmio_vf_save - Save VF MMIO register values to a buffer.
> + * @gt: the &struct xe_gt
> + * @vfid: VF identifier
> + * @buf: destination buffer
> + * @size: destination buffer size in bytes
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_mmio_vf_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
> +{
> + u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
> + struct xe_reg scratch;
> + u32 *regs = buf;
> + int n, count;
> +
> + if (size != xe_gt_sriov_pf_mmio_vf_size(gt, vfid))
> + return -EINVAL;
> +
> + if (xe_gt_is_media_type(gt)) {
> + count = MED_VF_SW_FLAG_COUNT;
> + for (n = 0; n < count; n++) {
> + scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
> + regs[n] = xe_mmio_read32(>->mmio, scratch);
> + }
> + } else {
> + count = VF_SW_FLAG_COUNT;
> + for (n = 0; n < count; n++) {
> + scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
> + regs[n] = xe_mmio_read32(>->mmio, scratch);
> + }
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xe_gt_sriov_pf_mmio_vf_restore - Restore VF MMIO register values from a buffer.
> + * @gt: the &struct xe_gt
> + * @vfid: VF identifier
> + * @buf: source buffer
> + * @size: source buffer size in bytes
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_mmio_vf_restore(struct xe_gt *gt, unsigned int vfid,
> + const void *buf, size_t size)
> +{
> + u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
> + const u32 *regs = buf;
> + struct xe_reg scratch;
> + int n, count;
> +
> + if (size != xe_gt_sriov_pf_mmio_vf_size(gt, vfid))
> + return -EINVAL;
> +
> + if (xe_gt_is_media_type(gt)) {
> + count = MED_VF_SW_FLAG_COUNT;
> + for (n = 0; n < count; n++) {
> + scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
> + xe_mmio_write32(>->mmio, scratch, regs[n]);
> + }
> + } else {
> + count = VF_SW_FLAG_COUNT;
> + for (n = 0; n < count; n++) {
> + scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
> + xe_mmio_write32(>->mmio, scratch, regs[n]);
> + }
> + }
> +
> + return 0;
> +}
> +
> /**
> * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
> * @gt: the &xe_gt
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
> index e7fde3f9937af..5e5f31d943d89 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
> @@ -6,6 +6,8 @@
> #ifndef _XE_GT_SRIOV_PF_H_
> #define _XE_GT_SRIOV_PF_H_
>
> +#include <linux/types.h>
likely also <linux/errno.h> if you want to keep stubs (but double check if those are really needed)
> +
> struct xe_gt;
>
> #ifdef CONFIG_PCI_IOV
> @@ -16,6 +18,10 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
> void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
> void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
> void xe_gt_sriov_pf_restart(struct xe_gt *gt);
> +size_t xe_gt_sriov_pf_mmio_vf_size(struct xe_gt *gt, unsigned int vfid);
> +int xe_gt_sriov_pf_mmio_vf_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size);
> +int xe_gt_sriov_pf_mmio_vf_restore(struct xe_gt *gt, unsigned int vfid,
> + const void *buf, size_t size);
> #else
> static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
> {
> @@ -38,6 +44,19 @@ static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
> static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt)
> {
> }
> +size_t xe_gt_sriov_pf_mmio_vf_size(struct xe_gt *gt, unsigned int vfid)
> +{
> + return 0;
> +}
> +int xe_gt_sriov_pf_mmio_vf_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
> +{
> + return -ENODEV;
> +}
> +int xe_gt_sriov_pf_mmio_vf_restore(struct xe_gt *gt, unsigned int vfid,
> + const void *buf, size_t size)
> +{
> + return -ENODEV;
> +}
> #endif
>
> #endif
Powered by blists - more mailing lists