[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <eb4d6982-e98d-48f0-a69d-617218686514@intel.com>
Date: Fri, 31 Oct 2025 17:59:12 +0100
From: Michal Wajdeczko <michal.wajdeczko@...el.com>
To: Michał Winiarski <michal.winiarski@...el.com>, "Alex
Williamson" <alex@...zbot.org>, Lucas De Marchi <lucas.demarchi@...el.com>,
Thomas Hellström <thomas.hellstrom@...ux.intel.com>,
"Rodrigo Vivi" <rodrigo.vivi@...el.com>, Jason Gunthorpe <jgg@...pe.ca>,
Yishai Hadas <yishaih@...dia.com>, Kevin Tian <kevin.tian@...el.com>, Shameer
Kolothum <skolothumtho@...dia.com>, <intel-xe@...ts.freedesktop.org>,
<linux-kernel@...r.kernel.org>, <kvm@...r.kernel.org>, Matthew Brost
<matthew.brost@...el.com>
CC: <dri-devel@...ts.freedesktop.org>, Jani Nikula
<jani.nikula@...ux.intel.com>, Joonas Lahtinen
<joonas.lahtinen@...ux.intel.com>, Tvrtko Ursulin <tursulin@...ulin.net>,
David Airlie <airlied@...il.com>, Simona Vetter <simona@...ll.ch>, "Lukasz
Laguna" <lukasz.laguna@...el.com>, Christoph Hellwig <hch@...radead.org>
Subject: Re: [PATCH v3 17/28] drm/xe/pf: Add helpers for VF GGTT migration
data handling
On 10/30/2025 9:31 PM, Michał Winiarski wrote:
> In an upcoming change, the VF GGTT migration data will be handled as
> part of VF control state machine. Add the necessary helpers to allow the
> migration data transfer to/from the HW GGTT resource.
>
> Signed-off-by: Michał Winiarski <michal.winiarski@...el.com>
> ---
> drivers/gpu/drm/xe/xe_ggtt.c | 104 +++++++++++++++++++++
> drivers/gpu/drm/xe/xe_ggtt.h | 4 +
> drivers/gpu/drm/xe/xe_ggtt_types.h | 2 +
> drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 52 +++++++++++
> drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h | 5 +
> 5 files changed, 167 insertions(+)
>
> diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
> index 20d226d90c50f..00ddb4f013466 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt.c
> +++ b/drivers/gpu/drm/xe/xe_ggtt.c
> @@ -151,6 +151,14 @@ static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
> ggtt_update_access_counter(ggtt);
> }
>
> +static u64 xe_ggtt_get_pte(struct xe_ggtt *ggtt, u64 addr)
> +{
> + xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
> + xe_tile_assert(ggtt->tile, addr < ggtt->size);
> +
> + return readq(&ggtt->gsm[addr >> XE_PTE_SHIFT]);
> +}
> +
> static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
> {
> u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
> @@ -233,16 +241,19 @@ void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
> static const struct xe_ggtt_pt_ops xelp_pt_ops = {
> .pte_encode_flags = xelp_ggtt_pte_flags,
> .ggtt_set_pte = xe_ggtt_set_pte,
> + .ggtt_get_pte = xe_ggtt_get_pte,
> };
>
> static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
> .pte_encode_flags = xelpg_ggtt_pte_flags,
> .ggtt_set_pte = xe_ggtt_set_pte,
> + .ggtt_get_pte = xe_ggtt_get_pte,
> };
>
> static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
> .pte_encode_flags = xelpg_ggtt_pte_flags,
> .ggtt_set_pte = xe_ggtt_set_pte_and_flush,
> + .ggtt_get_pte = xe_ggtt_get_pte,
> };
>
> static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
> @@ -889,6 +900,20 @@ u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare)
> return max_hole;
> }
>
> +/**
> + * xe_ggtt_node_pt_size() - Convert GGTT node size to its page table entries size.
> + * @node: the &xe_ggtt_node
> + *
> + * Return: GGTT node page table entries size in bytes.
> + */
> +size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node)
> +{
> + if (!node)
> + return 0;
> +
> + return node->base.size / XE_PAGE_SIZE * sizeof(u64);
> +}
> +
> #ifdef CONFIG_PCI_IOV
> static u64 xe_encode_vfid_pte(u16 vfid)
> {
> @@ -930,6 +955,85 @@ void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
> xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
> mutex_unlock(&node->ggtt->lock);
> }
> +
> +/**
> + * xe_ggtt_node_save() - Save a &xe_ggtt_node to a buffer.
> + * @node: the &xe_ggtt_node to be saved
> + * @dst: destination buffer
> + * @size: destination buffer size in bytes
> + * @vfid: VF identifier
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid)
> +{
> + struct xe_ggtt *ggtt;
> + u64 start, end;
> + u64 *buf = dst;
> + u64 pte;
> +
> + if (!node)
> + return -ENOENT;
> +
> + guard(mutex)(&node->ggtt->lock);
> +
> + ggtt = node->ggtt;
> + start = node->base.start;
> + end = start + size - 1;
as you've already noticed, this 'size' (PT size) is the right size to calc the end
> +
> + if (xe_ggtt_node_pt_size(node) != size)
> + return -EINVAL;
> +
> + while (start < end) {
> + pte = ggtt->pt_ops->ggtt_get_pte(ggtt, start);
> + if (vfid != u64_get_bits(pte, GGTT_PTE_VFID))
> + return -EPERM;
> +
> + *buf++ = u64_replace_bits(pte, 0, GGTT_PTE_VFID);
> + start += XE_PAGE_SIZE;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * xe_ggtt_node_load() - Load a &xe_ggtt_node from a buffer.
> + * @node: the &xe_ggtt_node to be loaded
> + * @src: source buffer
> + * @size: source buffer size in bytes
> + * @vfid: VF identifier
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid)
> +{
> + u64 vfid_pte = xe_encode_vfid_pte(vfid);
> + const u64 *buf = src;
> + struct xe_ggtt *ggtt;
> + u64 start, end;
> +
> + if (!node)
> + return -ENOENT;
> +
> + guard(mutex)(&node->ggtt->lock);
> +
> + ggtt = node->ggtt;
> + start = node->base.start;
> + end = start + size - 1;
ditto
> +
> + if (xe_ggtt_node_pt_size(node) != size)
> + return -EINVAL;
> +
> + while (start < end) {
> + vfid_pte = u64_replace_bits(*buf++, vfid, GGTT_PTE_VFID);
> + ggtt->pt_ops->ggtt_set_pte(ggtt, start, vfid_pte);
> + start += XE_PAGE_SIZE;
> + }
> + xe_ggtt_invalidate(ggtt);
> +
> + return 0;
> +}
> +
> #endif
>
> /**
> diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
> index 75fc7a1efea76..1edf27608d39a 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt.h
> +++ b/drivers/gpu/drm/xe/xe_ggtt.h
> @@ -41,8 +41,12 @@ u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare);
> int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
> u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer *p);
>
> +size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node);
> +
> #ifdef CONFIG_PCI_IOV
> void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
> +int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid);
> +int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid);
> #endif
>
> #ifndef CONFIG_LOCKDEP
> diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
> index c5e999d58ff2a..dacd796f81844 100644
> --- a/drivers/gpu/drm/xe/xe_ggtt_types.h
> +++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
> @@ -78,6 +78,8 @@ struct xe_ggtt_pt_ops {
> u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index);
> /** @ggtt_set_pte: Directly write into GGTT's PTE */
> void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
> + /** @ggtt_get_pte: Directly read from GGTT's PTE */
> + u64 (*ggtt_get_pte)(struct xe_ggtt *ggtt, u64 addr);
> };
>
> #endif
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> index c0c0215c07036..55444883f2ac3 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
> @@ -726,6 +726,58 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
> return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
> }
>
> +/**
> + * xe_gt_sriov_pf_config_ggtt_save() - Save a VF provisioned GGTT data into a buffer.
> + * @gt: the &xe_gt
> + * @vfid: VF identifier (can't be 0)
> + * @buf: the GGTT data destination buffer (or NULL to query the buf size)
> + * @size: the size of the buffer (or 0 to query the buf size)
> + *
> + * This function can only be called on PF.
> + *
> + * Return: size of the buffer needed to save GGTT data if querying,
> + * 0 on successful save or a negative error code on failure.
> + */
> +ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
> + void *buf, size_t size)
> +{
> + struct xe_ggtt_node *node;
> +
> + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> + xe_gt_assert(gt, vfid);
> + xe_gt_assert(gt, !(!buf ^ !size));
> +
> + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
> + node = pf_pick_vf_config(gt, vfid)->ggtt_region;
> +
> + if (!buf)
> + return xe_ggtt_node_pt_size(node);
> +
> + return xe_ggtt_node_save(node, buf, size, vfid);
> +}
> +
> +/**
> + * xe_gt_sriov_pf_config_ggtt_restore() - Restore a VF provisioned GGTT data from a buffer.
> + * @gt: the &xe_gt
> + * @vfid: VF identifier (can't be 0)
> + * @buf: the GGTT data source buffer
> + * @size: the size of the buffer
> + *
> + * This function can only be called on PF.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
> + const void *buf, size_t size)
> +{
> + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
> + xe_gt_assert(gt, vfid);
> +
> + guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
> +
> + return xe_ggtt_node_load(pf_pick_vf_config(gt, vfid)->ggtt_region, buf, size, vfid);
> +}
> +
> static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
> {
> /* XXX: preliminary */
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
> index 513e6512a575b..0293ba98eb6df 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
> @@ -61,6 +61,11 @@ ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *bu
> int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
> const void *buf, size_t size);
>
> +ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
> + void *buf, size_t size);
> +int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
> + const void *buf, size_t size);
> +
> bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid);
>
> int xe_gt_sriov_pf_config_init(struct xe_gt *gt);
Powered by blists - more mailing lists