[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20b283c6-d75d-400c-8955-851534b2f4f9@amd.com>
Date: Wed, 28 Jan 2026 00:08:42 -0500
From: "Kuehling, Felix" <felix.kuehling@....com>
To: Jordan Niethe <jniethe@...dia.com>, linux-mm@...ck.org
Cc: balbirs@...dia.com, matthew.brost@...el.com, akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org, dri-devel@...ts.freedesktop.org,
david@...hat.com, ziy@...dia.com, apopple@...dia.com,
lorenzo.stoakes@...cle.com, lyude@...hat.com, dakr@...nel.org,
airlied@...il.com, simona@...ll.ch, rcampbell@...dia.com,
mpenttil@...hat.com, jgg@...dia.com, willy@...radead.org,
linuxppc-dev@...ts.ozlabs.org, intel-xe@...ts.freedesktop.org, jgg@...pe.ca,
jhubbard@...dia.com
Subject: Re: [PATCH v3 02/13] drm/amdkfd: Use migrate pfns internally
On 2026-01-23 01:22, Jordan Niethe wrote:
> A future change will remove device private pages from the physical
> address space. This will mean that device private pages no longer have a
> pfn.
>
> A MIGRATE_PFN flag will be introduced that distinguishes between mpfns
> that contain a pfn vs an offset into device private memory.
>
> Replace usages of pfns and page_to_pfn() with mpfns and
> migrate_pfn_to_page() to prepare for handling this distinction. This
> will assist in continuing to use the same code paths for both
> MEMORY_DEVICE_PRIVATE and MEMORY_DEVICE_COHERENT devices.
>
> Signed-off-by: Jordan Niethe <jniethe@...dia.com>
Reviewed-by: Felix Kuehling <felix.kuehling@....com>
> ---
> v2:
> - New to series
> v3:
> - No change
> ---
> drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 15 +++++++--------
> drivers/gpu/drm/amd/amdkfd/kfd_migrate.h | 2 +-
> 2 files changed, 8 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index 1f03cf7342a5..3dd7a35d19f7 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -210,17 +210,17 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
> }
>
> unsigned long
> -svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
> +svm_migrate_addr_to_mpfn(struct amdgpu_device *adev, unsigned long addr)
> {
> - return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
> + return migrate_pfn((addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT);
> }
>
> static void
> -svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
> +svm_migrate_get_vram_page(struct svm_range *prange, unsigned long mpfn)
> {
> struct page *page;
>
> - page = pfn_to_page(pfn);
> + page = migrate_pfn_to_page(mpfn);
> svm_range_bo_ref(prange->svm_bo);
> page->zone_device_data = prange->svm_bo;
> zone_device_page_init(page, 0);
> @@ -231,7 +231,7 @@ svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
> {
> struct page *page;
>
> - page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
> + page = migrate_pfn_to_page(svm_migrate_addr_to_mpfn(adev, addr));
> unlock_page(page);
> put_page(page);
> }
> @@ -241,7 +241,7 @@ svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
> {
> unsigned long addr;
>
> - addr = page_to_pfn(page) << PAGE_SHIFT;
> + addr = (migrate_pfn_from_page(page) >> MIGRATE_PFN_SHIFT) << PAGE_SHIFT;
> return (addr - adev->kfd.pgmap.range.start);
> }
>
> @@ -307,9 +307,8 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
>
> if (migrate->src[i] & MIGRATE_PFN_MIGRATE) {
> dst[i] = cursor.start + (j << PAGE_SHIFT);
> - migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
> + migrate->dst[i] = svm_migrate_addr_to_mpfn(adev, dst[i]);
> svm_migrate_get_vram_page(prange, migrate->dst[i]);
> - migrate->dst[i] = migrate_pfn(migrate->dst[i]);
> mpages++;
> }
> spage = migrate_pfn_to_page(migrate->src[i]);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
> index 2b7fd442d29c..a80b72abe1e0 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
> @@ -48,7 +48,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
> uint32_t trigger, struct page *fault_page);
>
> unsigned long
> -svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
> +svm_migrate_addr_to_mpfn(struct amdgpu_device *adev, unsigned long addr);
>
> #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
>
Powered by blists - more mailing lists