[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210825034828.12927-7-alex.sierra@amd.com>
Date: Tue, 24 Aug 2021 22:48:20 -0500
From: Alex Sierra <alex.sierra@....com>
To: akpm@...ux-foundation.org, Felix.Kuehling@....com,
linux-mm@...ck.org, rcampbell@...dia.com,
linux-ext4@...r.kernel.org, linux-xfs@...r.kernel.org
Cc: amd-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org,
hch@....de, jgg@...dia.com, jglisse@...hat.com
Subject: [PATCH v1 06/14] drm/amdkfd: add SPM support for SVM
When CPU is connected throug XGMI, it has coherent
access to VRAM resource. In this case that resource
is taken from a table in the device gmc aperture base.
This resource is used along with the device type, which could
be DEVICE_PRIVATE or DEVICE_PUBLIC to create the device
page map region.
Signed-off-by: Alex Sierra <alex.sierra@....com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@....com>
---
v7:
Remove lookup_resource call, so export symbol for this function
is not longer required. Patch dropped "kernel: resource:
lookup_resource as exported symbol"
---
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 29 +++++++++++++++---------
1 file changed, 18 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 47ee9a895cd2..dd245699479f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -865,7 +865,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
{
struct kfd_dev *kfddev = adev->kfd.dev;
struct dev_pagemap *pgmap;
- struct resource *res;
+ struct resource *res = NULL;
unsigned long size;
void *r;
@@ -880,19 +880,25 @@ int svm_migrate_init(struct amdgpu_device *adev)
* should remove reserved size
*/
size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
- res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
- if (IS_ERR(res))
- return -ENOMEM;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ pgmap->range.start = adev->gmc.aper_base;
+ pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
+ pgmap->type = MEMORY_DEVICE_PUBLIC;
+ } else {
+ res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
+ if (IS_ERR(res))
+ return -ENOMEM;
+ pgmap->range.start = res->start;
+ pgmap->range.end = res->end;
+ pgmap->type = MEMORY_DEVICE_PRIVATE;
+ }
- pgmap->type = MEMORY_DEVICE_PRIVATE;
pgmap->nr_range = 1;
- pgmap->range.start = res->start;
- pgmap->range.end = res->end;
pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
- pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+ pgmap->flags = 0;
r = devm_memremap_pages(adev->dev, pgmap);
- if (IS_ERR(r)) {
+ if (res && IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
devm_release_mem_region(adev->dev, res->start,
res->end - res->start + 1);
@@ -914,6 +920,7 @@ void svm_migrate_fini(struct amdgpu_device *adev)
struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap;
devm_memunmap_pages(adev->dev, pgmap);
- devm_release_mem_region(adev->dev, pgmap->range.start,
- pgmap->range.end - pgmap->range.start + 1);
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE)
+ devm_release_mem_region(adev->dev, pgmap->range.start,
+ pgmap->range.end - pgmap->range.start + 1);
}
--
2.32.0
Powered by blists - more mailing lists